source
stringlengths
3
92
c
stringlengths
26
2.25M
owl_aeos_tuner_map_impl.h
/* * OWL - OCaml Scientific and Engineering Computing * Copyright (c) 2016-2022 Liang Wang <liang@ocaml.xyz> */ #ifdef FUN4 CAMLprim value BASE_FUN4(value vN, value vX, value vY) { CAMLparam3(vN, vX, vY); int N = Long_val(vN); struct caml_ba_array *X = Caml_ba_array_val(vX); NUMBER *X_data = (NUMBER *) X->data; struct caml_ba_array *Y = Caml_ba_array_val(vY); NUMBER1 *Y_data = (NUMBER1 *) Y->data; NUMBER *start_x, *stop_x; NUMBER1 *start_y; caml_release_runtime_system(); /* Allow other threads */ start_x = X_data; stop_x = start_x + N; start_y = Y_data; while (start_x != stop_x) { NUMBER x = *start_x; *start_y = MAPFN(x); start_x += 1; start_y += 1; }; caml_acquire_runtime_system(); /* Disallow other threads */ CAMLreturn(Val_unit); } CAMLprim value OMP_FUN4(value vN, value vX, value vY) { CAMLparam3(vN, vX, vY); int N = Long_val(vN); struct caml_ba_array *X = Caml_ba_array_val(vX); NUMBER *X_data = (NUMBER *) X->data; struct caml_ba_array *Y = Caml_ba_array_val(vY); NUMBER1 *Y_data = (NUMBER1 *) Y->data; NUMBER *start_x, *stop_x; NUMBER1 *start_y; caml_release_runtime_system(); /* Allow other threads */ start_x = X_data; stop_x = start_x + N; start_y = Y_data; #pragma omp parallel for schedule(static) for (int i = 0; i < N; i++) { NUMBER x = *(start_x + i); *(start_y + i) = (MAPFN(x)); } caml_acquire_runtime_system(); /* Disallow other threads */ CAMLreturn(Val_unit); } #endif /* FUN4 */ #ifdef BASE_FUN15 CAMLprim value BASE_FUN15(value vN, value vX, value vY, value vZ) { CAMLparam4(vN, vX, vY, vZ); int N = Long_val(vN); struct caml_ba_array *X = Caml_ba_array_val(vX); NUMBER *X_data = (NUMBER *) X->data; struct caml_ba_array *Y = Caml_ba_array_val(vY); NUMBER1 *Y_data = (NUMBER1 *) Y->data; struct caml_ba_array *Z = Caml_ba_array_val(vZ); NUMBER2 *Z_data = (NUMBER2 *) Z->data; NUMBER *start_x, *stop_x; NUMBER1 *start_y; NUMBER2 *start_z; caml_release_runtime_system(); /* Allow other threads */ start_x = X_data; stop_x = start_x + N; start_y = Y_data; start_z = Z_data; for (int i = 0; i < N; i++) { MAPFN((start_x + i), (start_y + i), (start_z + i)); } caml_acquire_runtime_system(); /* Disallow other threads */ CAMLreturn(Val_unit); } CAMLprim value OMP_FUN15(value vN, value vX, value vY, value vZ) { CAMLparam4(vN, vX, vY, vZ); int N = Long_val(vN); struct caml_ba_array *X = Caml_ba_array_val(vX); NUMBER *X_data = (NUMBER *) X->data; struct caml_ba_array *Y = Caml_ba_array_val(vY); NUMBER1 *Y_data = (NUMBER1 *) Y->data; struct caml_ba_array *Z = Caml_ba_array_val(vZ); NUMBER2 *Z_data = (NUMBER2 *) Z->data; NUMBER *start_x, *stop_x; NUMBER1 *start_y; NUMBER2 *start_z; caml_release_runtime_system(); /* Allow other threads */ start_x = X_data; stop_x = start_x + N; start_y = Y_data; start_z = Z_data; #pragma omp parallel for schedule(static) for (int i = 0; i < N; i++) { MAPFN((start_x + i), (start_y + i), (start_z + i)); } caml_acquire_runtime_system(); /* Disallow other threads */ CAMLreturn(Val_unit); } #endif /* FUN15 */ #undef NUMBER #undef NUMBER1 #undef NUMBER2 #undef MAPFN #undef FUN4 #undef FUN15 #undef OMP_FUN4 #undef OMP_FUN15 #undef BASE_FUN4 #undef BASE_FUN15
sparselevmarq.h
#ifndef ucoslam_SparseLevMarq_H #define ucoslam_SparseLevMarq_H #include <Eigen/Sparse> #include <functional> #include <iostream> #include <cmath> #include <omp.h> #include <ctime> #include <cstring> #include <vector> #include <chrono> #include <iomanip> namespace ucoslam{ //Sparse Levenberg-Marquardt method for general problems //Inspired in //@MISC\{IMM2004-03215, // author = "K. Madsen and H. B. Nielsen and O. Tingleff", // title = "Methods for Non-Linear Least Squares Problems (2nd ed.)", // year = "2004", // pages = "60", // publisher = "Informatics and Mathematical Modelling, Technical University of Denmark, {DTU}", // address = "Richard Petersens Plads, Building 321, {DK-}2800 Kgs. Lyngby", // url = "http://www.ltu.se/cms_fs/1.51590!/nonlinear_least_squares.pdf" //} template<typename T> class SparseLevMarq{ public: struct Params{ Params(){} Params(int _maxIters,T _minError,T _min_step_error_diff=0, T _min_average_step_error_diff=0.001 ,T _tau=1 ,T _der_epsilon=1e-3){ maxIters=_maxIters; minError=_minError; min_step_error_diff=_min_step_error_diff; min_average_step_error_diff=_min_step_error_diff; tau=_tau; der_epsilon=_der_epsilon; } int maxIters=100;//maximum number of iterations T minError=1e-5; //minimum error. Below this optimization stops T min_step_error_diff=0; //if error reduction between iterations is below this, optimization stops T min_average_step_error_diff=0.001; T tau=1 ; //indicating how near the initial solution is estimated to be to the real one. If 1, it means that it is very far and the first T der_epsilon=1e-3; //value employed for automatic differentiation bool cal_dev_parallel=true;//indicates if the compuation of derivatives is done in parallel. If so, the error function must be reentrant bool use_omp=true; bool verbose=false; }; typedef Eigen::Matrix<T,Eigen::Dynamic,1> eVector; typedef std::function<void(const eVector &, eVector &)> F_z_x; typedef std::function<void(const eVector &, Eigen::SparseMatrix<T> &)> F_z_J; SparseLevMarq(); /** * @brief setParams * @param maxIters maximum number of iterations of the algoritm * @param minError to stop the algorithm before reaching the max iterations * @param min_step_error_diff minimum error difference between two iterations. If below this level, then stop. * @param tau parameter indicating how near the initial solution is estimated to be to the real one. If 1, it means that it is very far and the first * @param der_epsilon increment to calculate the derivate of the evaluation function * step will be very short. If near 0, means the opposite. This value is auto calculated in the subsequent iterations. */ void setParams(int maxIters,T minError,T min_step_error_diff=0,T tau=1 ,T der_epsilon=1e-3); void setParams(const Params &p); /** * @brief solve non linear minimization problem ||F(z)||, where F(z)=f(z) f(z)^t * @param z function params 1xP to be estimated. input-output. Contains the result of the optimization * @param f_z_x evaluation function f(z)=x * first parameter : z : input. Data is in T precision as a row vector (1xp) * second parameter : x : output. Data must be returned in T * @param f_J computes the jacobian of f(z) * first parameter : z : input. Data is in T precision as a row vector (1xp) * second parameter : J : output. Data must be returned in T * @return final error */ T solve( eVector &z, F_z_x , F_z_J)throw (std::exception); /// Step by step solve mode /** * @brief init initializes the search engine * @param z */ void init(eVector &z, F_z_x )throw (std::exception); /** * @brief step gives a step of the search * @param f_z_x error evaluation function * @param f_z_J Jacobian function * @return error of current solution */ bool step( F_z_x f_z_x , F_z_J f_z_J)throw (std::exception); bool step( F_z_x f_z_x)throw (std::exception); /** * @brief getCurrentSolution returns the current solution * @param z output * @return error of the solution */ T getCurrentSolution(eVector &z)throw (std::exception); /** * @brief getBestSolution sets in z the best solution up to this moment * @param z output * @return error of the solution */ T getBestSolution(eVector &z)throw (std::exception); /** Automatic jacobian estimation * @brief solve non linear minimization problem ||F(z)||, where F(z)=f(z) f(z)^t * @param z function params 1xP to be estimated. input-output. Contains the result of the optimization * @param f_z_x evaluation function f(z)=x * first parameter : z : input. Data is in T precision as a row vector (1xp) * second parameter : x : output. Data must be returned in T * @return final error */ T solve( eVector &z, F_z_x )throw (std::exception); //sets a callback func call at each step void setStepCallBackFunc(std::function<void(const eVector &)> callback){_step_callback=callback;} //sets a function that indicates when the algorithm must be stop. returns true if must stop and false otherwise void setStopFunction( std::function<bool(const eVector &)> stop_function){_stopFunction=stop_function;} void calcDerivates_omp(const eVector & z , Eigen::SparseMatrix<T> &sJ, F_z_x f_z_x); void calcDerivates(const eVector & z , Eigen::SparseMatrix<T> &sJ, F_z_x f_z_x); Params _params; private: //-------- eVector curr_z,x64; T currErr,prevErr,minErr ; Eigen::SparseMatrix<T> I,J; T mu,v; std::function<void(const eVector &)> _step_callback; std::function<bool(const eVector &)> _stopFunction; void add_missing_diagonal_elements( Eigen::SparseMatrix<T> &M)throw (std::exception); void get_diagonal_elements_refs_and_add( Eigen::SparseMatrix<T> &M,std::vector<T*> &d_refs,T add_val)throw (std::exception); void mult(const Eigen::SparseMatrix<T> &lhs, const Eigen::SparseMatrix<T> &rhs,Eigen::SparseMatrix<T> &res); }; template<typename T> SparseLevMarq<T>::SparseLevMarq(){ } /** * @brief setParams * @param maxIters maximum number of iterations of the algoritm * @param minError to stop the algorithm before reaching the max iterations * @param min_step_error_diff minimum error difference between two iterations. If below this level, then stop. * @param tau parameter indicating how near the initial solution is estimated to be to the real one. If 1, it means that it is very far and the first * @param der_epsilon increment to calculate the derivate of the evaluation function * step will be very short. If near 0, means the opposite. This value is auto calculated in the subsequent iterations. */ template<typename T> void SparseLevMarq<T>::setParams(const Params &p){ _params=p; } template<typename T> void SparseLevMarq<T>:: calcDerivates_omp(const eVector & z , Eigen::SparseMatrix<T> &sJ, F_z_x f_z_x) { std::vector< std::vector<Eigen::Triplet<T> > > sp_triplets(omp_get_max_threads()); #pragma omp parallel for for (int i=0;i<z.rows();i++) { eVector zp(z),zm(z); zp(i)+=_params.der_epsilon; zm(i)-=_params.der_epsilon; eVector xp,xm; f_z_x( zp,xp); f_z_x( zm,xm); eVector dif=(xp-xm)/(2.f*_params.der_epsilon); //add the non zero elementos int tidx=omp_get_thread_num(); for(int r=0;r<dif.rows();r++) if (fabs(dif(r))>1e-4) sp_triplets[tidx].push_back(Eigen::Triplet<T> (r,i,dif(r))); } //join all triplets int n=0; for(auto s:sp_triplets) n+=s.size(); std::vector<Eigen::Triplet<T> > sp_tripletsAll(n); int cidx=0; for(size_t i=0;i<sp_triplets.size();i++){ memcpy(&sp_tripletsAll[cidx],& sp_triplets[i][0],sizeof(Eigen::Triplet<T>)*sp_triplets[i].size() ); cidx+=sp_triplets[i].size() ; } sJ.setFromTriplets(sp_tripletsAll.begin(),sp_tripletsAll.end()); } template<typename T> void SparseLevMarq<T>:: calcDerivates(const eVector & z , Eigen::SparseMatrix<T> &sJ, F_z_x f_z_x) { std::vector<Eigen::Triplet<T> > sp_triplets; for (int i=0;i<z.rows();i++) { eVector zp(z),zm(z); zp(i)+=_params.der_epsilon; zm(i)-=_params.der_epsilon; eVector xp,xm; f_z_x( zp,xp); f_z_x( zm,xm); eVector dif=(xp-xm)/(2.f*_params.der_epsilon); //add the non zero elementos for(int r=0;r<dif.rows();r++) if (fabs(dif(r))>1e-4) sp_triplets.push_back(Eigen::Triplet<T> (r,i,dif(r))); } sJ.setFromTriplets(sp_triplets.begin(),sp_triplets.end()); } template<typename T> T SparseLevMarq<T>:: solve( eVector &z, F_z_x f_z_x)throw (std::exception){ if (_params.cal_dev_parallel && _params.use_omp) return solve(z,f_z_x,std::bind(&SparseLevMarq<T>::calcDerivates_omp,this,std::placeholders::_1,std::placeholders::_2,f_z_x)); else return solve(z,f_z_x,std::bind(&SparseLevMarq<T>::calcDerivates,this,std::placeholders::_1,std::placeholders::_2,f_z_x)); } template<typename T> bool SparseLevMarq<T>:: step( F_z_x f_z_x)throw (std::exception){ if (_params.cal_dev_parallel && _params.use_omp) return step(f_z_x,std::bind(&SparseLevMarq<T>::calcDerivates_omp,this,std::placeholders::_1,std::placeholders::_2,f_z_x)); else return step(f_z_x,std::bind(&SparseLevMarq<T>::calcDerivates,this,std::placeholders::_1,std::placeholders::_2,f_z_x)); } template<typename T> void SparseLevMarq<T>::init(eVector &z, F_z_x f_z_x )throw (std::exception){ curr_z=z; I.resize(z.rows(),z.rows()); I.setIdentity(); f_z_x(curr_z,x64); // std::cerr<<x64.transpose()<<std::endl; minErr=currErr=prevErr=x64.cwiseProduct(x64).sum(); J.resize(x64.rows(),z.rows()); mu=-1; } template<typename T> void SparseLevMarq<T>::get_diagonal_elements_refs_and_add( Eigen::SparseMatrix<T> &M,std::vector<T*> &refs,T add_val)throw (std::exception){ refs.resize(M.cols()); //now, get their references and add mu for (int k=0; k<M.outerSize(); ++k) for ( typename Eigen::SparseMatrix<T>::InnerIterator it(M,k); it; ++it) if (it.row()== it.col()) {refs[it.row()]= &it.valueRef(); *refs[it.row()]+=add_val;} } //parallel sparse matrix multiplication //modyfied by rafael muñoz salinas (rmsalinas@uco.es) to make it parallel template<typename T> void SparseLevMarq<T>::mult(const Eigen::SparseMatrix<T> &lhs, const Eigen::SparseMatrix<T> &rhs,Eigen::SparseMatrix<T> &res) { // make sure to call innerSize/outerSize since we fake the storage order. uint32_t rows = lhs.innerSize(); uint32_t cols = rhs.outerSize(); eigen_assert(lhs.outerSize() == rhs.innerSize()); typedef typename std::map<uint32_t,T> RowVal; typedef typename std::pair<uint32_t,RowVal> Col_RowVal; //pair col-rowval typedef typename std::vector< Col_RowVal> Col_RowValSet; std::vector<Col_RowValSet>omp_container(omp_get_max_threads()); // we compute each column of the result, in parallel after the other #pragma omp parallel for for (int j=0; j<cols; ++j) { int tid=omp_get_thread_num(); omp_container[tid].push_back( std::make_pair(j,RowVal()) ); RowVal &row_val=omp_container[tid].back().second; for (typename Eigen::SparseMatrix<T>::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt) { T y = rhsIt.value(); uint32_t k = rhsIt.index(); //add all indices for (typename Eigen::SparseMatrix<T>::InnerIterator lhsIt(lhs, k); lhsIt; ++lhsIt) { uint32_t i = lhsIt.index(); T x = lhsIt.value(); auto iter=row_val.find(i); if (iter==row_val.end()) row_val.insert(std::make_pair(i,x*y)); else iter->second+=x*y; } } } //finally, unordered insertion // unordered insertion typedef Eigen::SparseMatrix<T,Eigen::RowMajor,int32_t> RowMajorMatrix; typedef Eigen::SparseMatrix<T,Eigen::ColMajor,int32_t> ColMajorMatrix; ColMajorMatrix resCol(rows,cols); resCol.reserve( lhs.nonZeros() + rhs.nonZeros() ); for(auto & omp_thread:omp_container) for(auto c_r_v:omp_thread){//take each thread results int j=c_r_v.first;//column resCol.startVec(j); for(auto r_v:c_r_v.second)//for each column element, add it resCol.insertBackByOuterInnerUnordered(j,r_v.first) = r_v.second; } resCol.finalize(); RowMajorMatrix resRow(resCol); res = resRow; } template<typename T> void SparseLevMarq<T>::add_missing_diagonal_elements(Eigen::SparseMatrix<T> &M)throw (std::exception){ std::vector<bool> diag(M.rows(),false); for (int k=0; k<M.outerSize(); ++k) for ( typename Eigen::SparseMatrix<T>::InnerIterator it(M,k); it; ++it) if (it.row()== it.col()) diag[it.row()]=true; //and add them for(size_t i=0;i<diag.size();i++) if (!diag[i]) M.insert(i,i) =0; } //template<typename T> //uint64_t signature(Eigen::SparseMatrix<T> &sm){ // uint64_t sum=0; // for (int k=0; k<sm.outerSize(); ++k) // for (typename Eigen::SparseMatrix<T>::InnerIterator it(sm,k); it; ++it) // sum+=(it.value()*10000) + it.row() + it.col() +it.index(); // return sum; //} #define splm_get_time(a,b) std::chrono::duration_cast<std::chrono::duration<T>>(a-b).count() template<typename T> bool SparseLevMarq<T>::step( F_z_x f_z_x, F_z_J f_J)throw (std::exception){ auto t1= std::chrono::high_resolution_clock::now(); f_J(curr_z,J); auto t2= std::chrono::high_resolution_clock::now(); Eigen::SparseMatrix<T> Jt=J.transpose(); auto t22= std::chrono::high_resolution_clock::now(); Eigen::SparseMatrix<T> JtJ; if (_params.use_omp) mult(Jt,J, JtJ);//parallel sparse matrix multiplication else JtJ=Jt*J; auto t3= std::chrono::high_resolution_clock::now(); eVector B=-Jt*x64; auto t4= std::chrono::high_resolution_clock::now(); if(mu<0){//first time only T maxv=std::numeric_limits<T>::lowest(); for (int k=0; k<JtJ.outerSize(); ++k) for (typename Eigen::SparseMatrix<T>::InnerIterator it(JtJ,k); it; ++it) if (it.row()== it.col()) if (it.value()>maxv) maxv=it.value(); mu=maxv*_params.tau; } T gain=0,prev_mu=0; std::vector<T*> refs; int ntries=0; bool isStepAccepted=false; auto t6=std::chrono::high_resolution_clock::now(),t5=std::chrono::high_resolution_clock::now();; do{ //add dumping factor to JtJ. #if 1 //very efficient in any case, but particularly if initial dump does not produce improvement and must reenter if(refs.size()==0){//first time into the do add_missing_diagonal_elements(JtJ); get_diagonal_elements_refs_and_add(JtJ,refs,mu); } else for(size_t i=0;i<refs.size();i++) *refs[i]+= mu-prev_mu;//update mu prev_mu=mu; Eigen::SimplicialLDLT<Eigen::SparseMatrix<T> > chol(JtJ); // performs a Cholesky #else //less efficient, but easier to understand Eigen::SparseMatrix<T> A=JtJ+I*mu; Eigen::SimplicialLDLT<Eigen::SparseMatrix<T> > chol(A); // performs a Cholesky #endif t5= std::chrono::high_resolution_clock::now(); eVector delta= chol.solve(B); t6= std::chrono::high_resolution_clock::now(); eVector estimated_z=curr_z+delta; //compute error f_z_x(estimated_z,x64); auto err=x64.cwiseProduct(x64).sum(); auto L=0.5*delta.transpose()*((mu*delta) - B); gain= (err-prevErr)/ L(0,0) ; //get gain if (gain>0 && ((err-prevErr)<0)){ mu=mu*std::max(T(0.33),T(1.-pow(2*gain-1,3))); v=2.f; currErr=err; curr_z=estimated_z; isStepAccepted=true; } else{ mu=mu*v; v=v*5;} }while( gain<=0 && ntries++<5 && !isStepAccepted); if (_params.verbose) std::cout<<std::setprecision(5) <<"Curr Error="<<currErr<<" AErr(prev-curr)="<<(prevErr-currErr)/x64.rows()<<" gain="<<gain<<" dumping factor="<<mu<<std::endl; if (_params.verbose) {std::cerr<<" J="<<splm_get_time(t2,t1)<<" transpose="<< splm_get_time(t22,t2)<<" Jt*J="<< splm_get_time(t3,t22)<<" B="<< splm_get_time(t4,t3) <<" chol="<< splm_get_time(t6,t5) <<std::endl; // std::cerr<<"solve="<<T(t4-t3)/T(CLOCKS_PER_SEC)<<std::endl; } return isStepAccepted; } template<typename T> T SparseLevMarq<T>:: getCurrentSolution(eVector &z)throw (std::exception){ z=curr_z; return currErr; } template<typename T> T SparseLevMarq<T>::solve( eVector &z, F_z_x f_z_x, F_z_J f_J)throw (std::exception){ prevErr=std::numeric_limits<T>::max(); init(z,f_z_x); if( _stopFunction){ do{ step(f_z_x,f_J); if (_step_callback) _step_callback(curr_z); }while(!_stopFunction(curr_z)); } else{ //intial error estimation int mustExit=0; for ( int i = 0; i < _params.maxIters && !mustExit; i++ ) { if (_params.verbose)std::cerr<<"iteration "<<i<<"/"<<_params.maxIters<< " "; bool isStepAccepted=step(f_z_x,f_J); //check if we must exit if ( currErr<_params.minError ) mustExit=1; if( fabs(prevErr -currErr)<=_params.min_step_error_diff || fabs((prevErr-currErr)/x64.rows())<=_params.min_average_step_error_diff || !isStepAccepted) mustExit=2; //exit if error increment if (currErr>prevErr )mustExit=3; // if ( (prevErr-currErr) < 1e-5 ) mustExit=true; if (_step_callback) _step_callback(curr_z); prevErr=currErr; } // std::cout<<"Exit code="<<mustExit<<std::endl; } z=curr_z; return currErr; } } #endif
.body.c
#define S1(zT0,zT1,zT2,zT3,zT4,zT5,i,j,k) C[i][j]=beta*C[i][j]+alpha*A[i][k]*B[k][j]; int t0, t1, t2, t3, t4, t5, t6, t6t, newlb_t6, newub_t6, t7, t7t, newlb_t7, newub_t7, t8; register int lb, ub, lb1, ub1, lb2, ub2; register int lbv, ubv; /* Generated from PLUTO-produced CLooG file by CLooG v0.14.1 64 bits in 2.79s. */ lb1=0; ub1=floord(M-1,128); #pragma omp parallel for shared(lb1,ub1) private(t0,t1,t2,t3,t4,t5,t6,t7,t8) for (t0=lb1; t0<=ub1; t0++) { for (t1=0;t1<=floord(N-1,256);t1++) { for (t2=0;t2<=floord(K-1,128);t2++) { for (t3=max(0,16*t0);t3<=min(16*t0+15,floord(M-1,8));t3++) { for (t4=max(0,2*t1);t4<=min(2*t1+1,floord(N-1,128));t4++) { for (t5=max(16*t2,0);t5<=min(16*t2+15,floord(K-1,8));t5++) { /*@ begin Loop( transform RegTile(loops=['t6','t7'], ufactors=[8,8]) for (t6=max(8*t3,0);t6<=min(M-1,8*t3+7);t6++) for (t7=max(8*t5,0);t7<=min(K-1,8*t5+7);t7++) { { lbv=max(128*t4,0); ubv=min(N-1,128*t4+127); #pragma ivdep #pragma vector always for (t8=lbv; t8<=ubv; t8++) { S1(t0,t1,t2,t3,t4,t5,t6,t8,t7) ; } } } ) @*/{ for (t6t=max(8*t3,0); t6t<=min(M-1,8*t3+7)-7; t6t=t6t+8) { for (t7t=max(8*t5,0); t7t<=min(K-1,8*t5+7)-7; t7t=t7t+8) { { lbv=max(128*t4,0); ubv=min(N-1,128*t4+127); #pragma ivdep #pragma vector always for (t8=lbv; t8<=ubv; t8++) { S1(t0,t1,t2,t3,t4,t5,t6t,t8,t7t); S1(t0,t1,t2,t3,t4,t5,t6t,t8,(t7t+1)); S1(t0,t1,t2,t3,t4,t5,t6t,t8,(t7t+2)); S1(t0,t1,t2,t3,t4,t5,t6t,t8,(t7t+3)); S1(t0,t1,t2,t3,t4,t5,t6t,t8,(t7t+4)); S1(t0,t1,t2,t3,t4,t5,t6t,t8,(t7t+5)); S1(t0,t1,t2,t3,t4,t5,t6t,t8,(t7t+6)); S1(t0,t1,t2,t3,t4,t5,t6t,t8,(t7t+7)); S1(t0,t1,t2,t3,t4,t5,(t6t+1),t8,t7t); S1(t0,t1,t2,t3,t4,t5,(t6t+1),t8,(t7t+1)); S1(t0,t1,t2,t3,t4,t5,(t6t+1),t8,(t7t+2)); S1(t0,t1,t2,t3,t4,t5,(t6t+1),t8,(t7t+3)); S1(t0,t1,t2,t3,t4,t5,(t6t+1),t8,(t7t+4)); S1(t0,t1,t2,t3,t4,t5,(t6t+1),t8,(t7t+5)); S1(t0,t1,t2,t3,t4,t5,(t6t+1),t8,(t7t+6)); S1(t0,t1,t2,t3,t4,t5,(t6t+1),t8,(t7t+7)); S1(t0,t1,t2,t3,t4,t5,(t6t+2),t8,t7t); S1(t0,t1,t2,t3,t4,t5,(t6t+2),t8,(t7t+1)); S1(t0,t1,t2,t3,t4,t5,(t6t+2),t8,(t7t+2)); S1(t0,t1,t2,t3,t4,t5,(t6t+2),t8,(t7t+3)); S1(t0,t1,t2,t3,t4,t5,(t6t+2),t8,(t7t+4)); S1(t0,t1,t2,t3,t4,t5,(t6t+2),t8,(t7t+5)); S1(t0,t1,t2,t3,t4,t5,(t6t+2),t8,(t7t+6)); S1(t0,t1,t2,t3,t4,t5,(t6t+2),t8,(t7t+7)); S1(t0,t1,t2,t3,t4,t5,(t6t+3),t8,t7t); S1(t0,t1,t2,t3,t4,t5,(t6t+3),t8,(t7t+1)); S1(t0,t1,t2,t3,t4,t5,(t6t+3),t8,(t7t+2)); S1(t0,t1,t2,t3,t4,t5,(t6t+3),t8,(t7t+3)); S1(t0,t1,t2,t3,t4,t5,(t6t+3),t8,(t7t+4)); S1(t0,t1,t2,t3,t4,t5,(t6t+3),t8,(t7t+5)); S1(t0,t1,t2,t3,t4,t5,(t6t+3),t8,(t7t+6)); S1(t0,t1,t2,t3,t4,t5,(t6t+3),t8,(t7t+7)); S1(t0,t1,t2,t3,t4,t5,(t6t+4),t8,t7t); S1(t0,t1,t2,t3,t4,t5,(t6t+4),t8,(t7t+1)); S1(t0,t1,t2,t3,t4,t5,(t6t+4),t8,(t7t+2)); S1(t0,t1,t2,t3,t4,t5,(t6t+4),t8,(t7t+3)); S1(t0,t1,t2,t3,t4,t5,(t6t+4),t8,(t7t+4)); S1(t0,t1,t2,t3,t4,t5,(t6t+4),t8,(t7t+5)); S1(t0,t1,t2,t3,t4,t5,(t6t+4),t8,(t7t+6)); S1(t0,t1,t2,t3,t4,t5,(t6t+4),t8,(t7t+7)); S1(t0,t1,t2,t3,t4,t5,(t6t+5),t8,t7t); S1(t0,t1,t2,t3,t4,t5,(t6t+5),t8,(t7t+1)); S1(t0,t1,t2,t3,t4,t5,(t6t+5),t8,(t7t+2)); S1(t0,t1,t2,t3,t4,t5,(t6t+5),t8,(t7t+3)); S1(t0,t1,t2,t3,t4,t5,(t6t+5),t8,(t7t+4)); S1(t0,t1,t2,t3,t4,t5,(t6t+5),t8,(t7t+5)); S1(t0,t1,t2,t3,t4,t5,(t6t+5),t8,(t7t+6)); S1(t0,t1,t2,t3,t4,t5,(t6t+5),t8,(t7t+7)); S1(t0,t1,t2,t3,t4,t5,(t6t+6),t8,t7t); S1(t0,t1,t2,t3,t4,t5,(t6t+6),t8,(t7t+1)); S1(t0,t1,t2,t3,t4,t5,(t6t+6),t8,(t7t+2)); S1(t0,t1,t2,t3,t4,t5,(t6t+6),t8,(t7t+3)); S1(t0,t1,t2,t3,t4,t5,(t6t+6),t8,(t7t+4)); S1(t0,t1,t2,t3,t4,t5,(t6t+6),t8,(t7t+5)); S1(t0,t1,t2,t3,t4,t5,(t6t+6),t8,(t7t+6)); S1(t0,t1,t2,t3,t4,t5,(t6t+6),t8,(t7t+7)); S1(t0,t1,t2,t3,t4,t5,(t6t+7),t8,t7t); S1(t0,t1,t2,t3,t4,t5,(t6t+7),t8,(t7t+1)); S1(t0,t1,t2,t3,t4,t5,(t6t+7),t8,(t7t+2)); S1(t0,t1,t2,t3,t4,t5,(t6t+7),t8,(t7t+3)); S1(t0,t1,t2,t3,t4,t5,(t6t+7),t8,(t7t+4)); S1(t0,t1,t2,t3,t4,t5,(t6t+7),t8,(t7t+5)); S1(t0,t1,t2,t3,t4,t5,(t6t+7),t8,(t7t+6)); S1(t0,t1,t2,t3,t4,t5,(t6t+7),t8,(t7t+7)); } } } for (t7=t7t; t7<=min(K-1,8*t5+7); t7=t7+1) { { lbv=max(128*t4,0); ubv=min(N-1,128*t4+127); #pragma ivdep #pragma vector always for (t8=lbv; t8<=ubv; t8++) { S1(t0,t1,t2,t3,t4,t5,t6t,t8,t7); S1(t0,t1,t2,t3,t4,t5,(t6t+1),t8,t7); S1(t0,t1,t2,t3,t4,t5,(t6t+2),t8,t7); S1(t0,t1,t2,t3,t4,t5,(t6t+3),t8,t7); S1(t0,t1,t2,t3,t4,t5,(t6t+4),t8,t7); S1(t0,t1,t2,t3,t4,t5,(t6t+5),t8,t7); S1(t0,t1,t2,t3,t4,t5,(t6t+6),t8,t7); S1(t0,t1,t2,t3,t4,t5,(t6t+7),t8,t7); } } } } for (t6=t6t; t6<=min(M-1,8*t3+7); t6=t6+1) { for (t7t=max(8*t5,0); t7t<=min(K-1,8*t5+7)-7; t7t=t7t+8) { { lbv=max(128*t4,0); ubv=min(N-1,128*t4+127); #pragma ivdep #pragma vector always for (t8=lbv; t8<=ubv; t8++) { S1(t0,t1,t2,t3,t4,t5,t6,t8,t7t); S1(t0,t1,t2,t3,t4,t5,t6,t8,(t7t+1)); S1(t0,t1,t2,t3,t4,t5,t6,t8,(t7t+2)); S1(t0,t1,t2,t3,t4,t5,t6,t8,(t7t+3)); S1(t0,t1,t2,t3,t4,t5,t6,t8,(t7t+4)); S1(t0,t1,t2,t3,t4,t5,t6,t8,(t7t+5)); S1(t0,t1,t2,t3,t4,t5,t6,t8,(t7t+6)); S1(t0,t1,t2,t3,t4,t5,t6,t8,(t7t+7)); } } } for (t7=t7t; t7<=min(K-1,8*t5+7); t7=t7+1) { { lbv=max(128*t4,0); ubv=min(N-1,128*t4+127); #pragma ivdep #pragma vector always for (t8=lbv; t8<=ubv; t8++) { S1(t0,t1,t2,t3,t4,t5,t6,t8,t7); } } } } } /*@ end @*/ } } } } } } /* End of CLooG code */
SoaDistanceTableAA.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. // Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp. // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// // -*- C++ -*- #ifndef QMCPLUSPLUS_DTDIMPL_AA_H #define QMCPLUSPLUS_DTDIMPL_AA_H #include "simd/algorithm.hpp" namespace qmcplusplus { /**@ingroup nnlist * @brief A derived classe from DistacneTableData, specialized for dense case */ template<typename T, unsigned D, int SC> struct SoaDistanceTableAA: public DTD_BConds<T,D,SC>, public DistanceTableData { int Ntargets; int Ntargets_padded; SoaDistanceTableAA(ParticleSet& target) : DTD_BConds<T,D,SC>(target.Lattice), DistanceTableData(target,target) { resize(target.getTotalNum()); } #if (__cplusplus >= 201103L) SoaDistanceTableAA()=delete; SoaDistanceTableAA(const SoaDistanceTableAA&)=delete; #endif ~SoaDistanceTableAA() {} size_t compute_size(int N) { const size_t N_padded = getAlignedSize<T>(N); const size_t Alignment = getAlignment<T>(); return (N_padded*(2*N-N_padded+1)+(Alignment-1)*N_padded)/2; } void resize(int n) { N[SourceIndex]=N[VisitorIndex]=Ntargets=n; Ntargets_padded=getAlignedSize<T>(n); Distances.resize(Ntargets,Ntargets_padded); const size_t total_size = compute_size(Ntargets); memoryPool.resize(total_size*D); Displacements.resize(Ntargets); for(int i=0; i<Ntargets; ++i) Displacements[i].attachReference(i,total_size,memoryPool.data()+compute_size(i)); Temp_r.resize(Ntargets); Temp_dr.resize(Ntargets); } inline void evaluate(ParticleSet& P) { CONSTEXPR T BigR= std::numeric_limits<T>::max(); //P.RSoA.copyIn(P.R); for(int iat=0; iat<Ntargets; ++iat) { DTD_BConds<T,D,SC>::computeDistances(P.R[iat], P.RSoA, Distances[iat], Displacements[iat], 0, Ntargets, iat); Distances[iat][iat]=BigR; //assign big distance } } inline void evaluate(ParticleSet& P, IndexType jat) { DTD_BConds<T,D,SC>::computeDistances(P.R[jat], P.RSoA, Distances[jat], Displacements[jat], 0, Ntargets, jat); Distances[jat][jat]=std::numeric_limits<T>::max(); //assign a big number } inline void moveOnSphere(const ParticleSet& P, const PosType& rnew) { DTD_BConds<T,D,SC>::computeDistances(rnew, P.RSoA, Temp_r.data(),Temp_dr, 0, Ntargets, P.activePtcl); } ///evaluate the temporary pair relations inline void move(const ParticleSet& P, const PosType& rnew) { //#pragma omp master moveOnSphere(P,rnew); } int get_first_neighbor(IndexType iat, RealType& r, PosType& dr, bool newpos) const { RealType min_dist = std::numeric_limits<RealType>::max(); int index=-1; if(newpos) { for(int jat=0; jat<Ntargets; ++jat) if(Temp_r[jat]<min_dist && jat!=iat) { min_dist = Temp_r[jat]; index = jat; } if(index>=0) dr=Temp_dr[index]; } else { for(int jat=0; jat<Ntargets; ++jat) if(Distances[iat][jat]<min_dist && jat!=iat) { min_dist = Distances[iat][jat]; index = jat; } if(index>=0) dr=Displacements[iat][index]; } r=min_dist; return index; } ///update the iat-th row for iat=[0,iat-1) inline void update(IndexType iat) { if(iat==0) return; //update by a cache line const int nupdate=getAlignedSize<T>(iat); simd::copy_n(Temp_r.data(),nupdate,Distances[iat]); for(int idim=0;idim<D; ++idim) simd::copy_n(Temp_dr.data(idim),nupdate,Displacements[iat].data(idim)); } }; } #endif
Example_SIMD.7.c
/* * @@name: SIMD.7c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success * @@version: omp_4.0 */ #include <stdio.h> #include <stdlib.h> #define N 45 int a[N], b[N], c[N]; #pragma omp declare simd inbranch int fib( int n ) { if (n <= 1) return n; else { return fib(n-1) + fib(n-2); } } int main(void) { int i; #pragma omp simd for (i=0; i < N; i++) b[i] = i; #pragma omp simd for (i=0; i < N; i++) { a[i] = fib(b[i]); } printf("Done a[%d] = %d\n", N-1, a[N-1]); return 0; }
r_ao2mo.c
/* * Author: Qiming Sun <osirpt.sun@gmail.com> * */ #include <stdlib.h> #include <string.h> #include <complex.h> #include <math.h> #include <assert.h> //#include <omp.h> #include "config.h" #include "cint.h" #include "np_helper/np_helper.h" #include "vhf/cvhf.h" #include "vhf/fblas.h" #include "vhf/nr_direct.h" #include "r_ao2mo.h" #define MIN(X,Y) ((X) < (Y) ? (X) : (Y)) #define MAX(X,Y) ((X) > (Y) ? (X) : (Y)) #define NCTRMAX 128 /* * s1-AO integrals to s1-MO integrals, efficient for i_count < j_count * shape requirements: * vout[:,bra_count*ket_count], eri[:,nao*nao] * s1, s2 here to label the AO symmetry */ int AO2MOmmm_r_iltj(double complex *vout, double complex *eri, struct _AO2MOEnvs *envs, int seekdim) { switch (seekdim) { case 1: return envs->bra_count * envs->ket_count; case 2: return envs->nao * envs->nao; } const double D0 = 0; const double D1 = 1; const char TRANS_T = 'T'; const char TRANS_N = 'N'; int n2c = envs->nao; int i_start = envs->bra_start; int i_count = envs->bra_count; int j_start = envs->ket_start; int j_count = envs->ket_count; int i; double *buf1 = malloc(sizeof(double)*n2c*i_count*3); double *buf2 = buf1 + n2c*i_count; double *buf3 = buf2 + n2c*i_count; double *bufr, *bufi; double *mo1 = malloc(sizeof(double) * n2c*MAX(i_count,j_count)*2); double *mo2, *mo_r, *mo_i; double *eri_r = malloc(sizeof(double) * n2c*n2c*3); double *eri_i = eri_r + n2c*n2c; double *eri1 = eri_i + n2c*n2c; double *vout1, *vout2, *vout3; // Gauss complex multiplication, C_pi^* (pq| = (iq|, where (pq| is in C-order mo_r = envs->mo_r + i_start * n2c; mo_i = envs->mo_i + i_start * n2c; mo2 = mo1 + n2c*i_count; for (i = 0; i < n2c*i_count; i++) { mo1[i] = mo_r[i] - mo_i[i]; mo2[i] =-mo_i[i] - mo_r[i]; } for (i = 0; i < n2c*n2c; i++) { eri_r[i] = creal(eri[i]); eri_i[i] = cimag(eri[i]); eri1 [i] = eri_r[i] + eri_i[i]; } dgemm_(&TRANS_N, &TRANS_N, &n2c, &i_count, &n2c, &D1, eri1, &n2c, mo_r, &n2c, &D0, buf1, &n2c); dgemm_(&TRANS_N, &TRANS_N, &n2c, &i_count, &n2c, &D1, eri_r, &n2c, mo2, &n2c, &D0, buf2, &n2c); dgemm_(&TRANS_N, &TRANS_N, &n2c, &i_count, &n2c, &D1, eri_i, &n2c, mo1, &n2c, &D0, buf3, &n2c); free(eri_r); // C_qj^* (iq| = (ij| bufr = buf3; bufi = buf2; for (i = 0; i < n2c*i_count; i++) { buf3[i] = buf1[i] - buf3[i]; buf2[i] = buf1[i] + buf2[i]; } for (i = 0; i < n2c*i_count; i++) { buf1[i] = bufr[i] + bufi[i]; } mo_r = envs->mo_r + j_start * n2c; mo_i = envs->mo_i + j_start * n2c; mo2 = mo1 + n2c*j_count; for (i = 0; i < n2c*j_count; i++) { mo1[i] = mo_r[i] + mo_i[i]; mo2[i] = mo_i[i] - mo_r[i]; } vout1 = malloc(sizeof(double)*i_count*j_count*3); vout2 = vout1 + i_count * j_count; vout3 = vout2 + i_count * j_count; dgemm_(&TRANS_T, &TRANS_N, &j_count, &i_count, &n2c, &D1, mo_r, &n2c, buf1, &n2c, &D0, vout1, &j_count); dgemm_(&TRANS_T, &TRANS_N, &j_count, &i_count, &n2c, &D1, mo2, &n2c, bufr, &n2c, &D0, vout2, &j_count); dgemm_(&TRANS_T, &TRANS_N, &j_count, &i_count, &n2c, &D1, mo1, &n2c, bufi, &n2c, &D0, vout3, &j_count); for (i = 0; i < i_count*j_count; i++) { vout[i] = (vout1[i]-vout3[i]) + (vout1[i]+vout2[i])*_Complex_I; } free(vout1); free(buf1); free(mo1); return 0; } int AO2MOmmm_r_s1_iltj(double complex *vout, double complex *eri, struct _AO2MOEnvs *envs, int seekdim) { return AO2MOmmm_r_iltj(vout, eri, envs, seekdim); } /* * s1-AO integrals to s1-MO integrals, efficient for i_count > j_count * shape requirements: * vout[:,bra_count*ket_count], eri[:,nao*nao] */ int AO2MOmmm_r_igtj(double complex *vout, double complex *eri, struct _AO2MOEnvs *envs, int seekdim) { switch (seekdim) { case 1: return envs->bra_count * envs->ket_count; case 2: return envs->nao * envs->nao; } const double D0 = 0; const double D1 = 1; const char TRANS_T = 'T'; const char TRANS_N = 'N'; int n2c = envs->nao; int i_start = envs->bra_start; int i_count = envs->bra_count; int j_start = envs->ket_start; int j_count = envs->ket_count; int i; double *buf1 = malloc(sizeof(double)*n2c*j_count*3); double *buf2 = buf1 + n2c*j_count; double *buf3 = buf2 + n2c*j_count; double *bufr, *bufi; double *mo1 = malloc(sizeof(double) * n2c*MAX(i_count,j_count)*2); double *mo2, *mo_r, *mo_i; double *eri_r = malloc(sizeof(double) * n2c*n2c*3); double *eri_i = eri_r + n2c*n2c; double *eri1 = eri_i + n2c*n2c; double *vout1, *vout2, *vout3; // Gauss complex multiplication, C_qj (pq| = (pj|, where (pq| is in C-order for (i = 0; i < n2c*n2c; i++) { eri_r[i] = creal(eri[i]); eri_i[i] = cimag(eri[i]); eri1 [i] = eri_r[i] + eri_i[i]; } mo_r = envs->mo_r + j_start * n2c; mo_i = envs->mo_i + j_start * n2c; mo2 = mo1 + n2c*j_count; for (i = 0; i < n2c*j_count; i++) { mo1[i] = mo_r[i] + mo_i[i]; mo2[i] = mo_i[i] - mo_r[i]; } dgemm_(&TRANS_T, &TRANS_N, &j_count, &n2c, &n2c, &D1, mo_r, &n2c, eri1, &n2c, &D0, buf1, &j_count); dgemm_(&TRANS_T, &TRANS_N, &j_count, &n2c, &n2c, &D1, mo2, &n2c, eri_r, &n2c, &D0, buf2, &j_count); dgemm_(&TRANS_T, &TRANS_N, &j_count, &n2c, &n2c, &D1, mo1, &n2c, eri_i, &n2c, &D0, buf3, &j_count); free(eri_r); bufr = buf3; bufi = buf2; for (i = 0; i < n2c*j_count; i++) { buf3[i] = buf1[i] - buf3[i]; buf2[i] = buf1[i] + buf2[i]; } for (i = 0; i < n2c*j_count; i++) { buf1[i] = bufr[i] + bufi[i]; } mo_r = envs->mo_r + i_start * n2c; mo_i = envs->mo_i + i_start * n2c; mo2 = mo1 + n2c*i_count; for (i = 0; i < n2c*i_count; i++) { mo1[i] = mo_r[i] - mo_i[i]; mo2[i] =-mo_i[i] - mo_r[i]; } vout1 = malloc(sizeof(double)*i_count*j_count*3); vout2 = vout1 + i_count * j_count; vout3 = vout2 + i_count * j_count; dgemm_(&TRANS_N, &TRANS_N, &j_count, &i_count, &n2c, &D1, buf1, &j_count, mo_r, &n2c, &D0, vout1, &j_count); dgemm_(&TRANS_N, &TRANS_N, &j_count, &i_count, &n2c, &D1, bufr, &j_count, mo2, &n2c, &D0, vout2, &j_count); dgemm_(&TRANS_N, &TRANS_N, &j_count, &i_count, &n2c, &D1, bufi, &j_count, mo1, &n2c, &D0, vout3, &j_count); for (i = 0; i < i_count*j_count; i++) { vout[i] = (vout1[i]-vout3[i]) + (vout1[i]+vout2[i])*_Complex_I; } free(vout1); free(buf1); free(mo1); return 0; } int AO2MOmmm_r_s1_igtj(double complex *vout, double complex *eri, struct _AO2MOEnvs *envs, int seekdim) { return AO2MOmmm_r_igtj(vout, eri, envs, seekdim); } /* * s1, s2ij, s2kl, s4 here to label the AO symmetry * eris[ncomp,nkl,nao*nao] */ static void fill_s1(int (*intor)(), int (*fprescreen)(), double complex *eri, int nkl, int ish, int jshtot, struct _AO2MOEnvs *envs) { const int nao = envs->nao; const size_t nao2 = nao * nao; const int *ao_loc = envs->ao_loc; const int klsh_start = envs->klsh_start; const int klsh_end = klsh_start + envs->klsh_count; const int di = ao_loc[ish+1] - ao_loc[ish]; int kl, jsh, ksh, lsh, dj, dk, dl; int icomp, i, j, k, l, n; int shls[4]; double complex *buf = malloc(sizeof(double complex) *di*nao*NCTRMAX*NCTRMAX*envs->ncomp); assert(buf); double complex *pbuf, *pbuf1, *peri; shls[0] = ish; for (kl = klsh_start; kl < klsh_end; kl++) { // kl = k * (k+1) / 2 + l ksh = kl / envs->nbas; lsh = kl - ksh * envs->nbas; dk = ao_loc[ksh+1] - ao_loc[ksh]; dl = ao_loc[lsh+1] - ao_loc[lsh]; shls[2] = ksh; shls[3] = lsh; pbuf = buf; for (jsh = 0; jsh < jshtot; jsh++) { dj = ao_loc[jsh+1] - ao_loc[jsh]; shls[1] = jsh; n = di * dj * dk * dl * envs->ncomp; if ((*fprescreen)(shls, envs->vhfopt, envs->atm, envs->bas, envs->env)) { (*intor)(pbuf, NULL, shls, envs->atm, envs->natm, envs->bas, envs->nbas, envs->env, envs->cintopt, NULL); } else { memset(pbuf, 0, sizeof(double complex)*n); } pbuf += n; } pbuf = buf; for (jsh = 0; jsh < jshtot; jsh++) { dj = ao_loc[jsh+1] - ao_loc[jsh]; for (icomp = 0; icomp < envs->ncomp; icomp++) { peri = eri + nao2 * nkl * icomp + ao_loc[ish] * nao + ao_loc[jsh]; for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { pbuf1 = pbuf + di * dj * (l*dk+k); for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri[i*nao+j] = pbuf1[j*di+i]; } } peri += nao2; } } pbuf += di * dj * dk * dl; } } eri += nao2 * dk * dl; } free(buf); } static void fill_s2(int (*intor)(), int (*fprescreen)(), double complex *eri, int nkl, int ish, int jshtot, struct _AO2MOEnvs *envs) { const int nao = envs->nao; const size_t nao2 = nao * nao; const int *ao_loc = envs->ao_loc; const int klsh_start = envs->klsh_start; const int klsh_end = klsh_start + envs->klsh_count; const int di = ao_loc[ish+1] - ao_loc[ish]; int kl, jsh, ksh, lsh, dj, dk, dl; int icomp, i, j, k, l, n; int shls[4]; double complex *buf = malloc(sizeof(double complex) *di*nao*nkl*envs->ncomp); assert(buf); double complex *pbuf, *pbuf1, *peri; shls[0] = ish; for (kl = klsh_start; kl < klsh_end; kl++) { // kl = k * (k+1) / 2 + l ksh = (int)(sqrt(2*kl+.25) - .5 + 1e-7); lsh = kl - ksh * (ksh+1) / 2; dk = ao_loc[ksh+1] - ao_loc[ksh]; dl = ao_loc[lsh+1] - ao_loc[lsh]; shls[2] = ksh; shls[3] = lsh; pbuf = buf; for (jsh = 0; jsh < jshtot; jsh++) { dj = ao_loc[jsh+1] - ao_loc[jsh]; shls[1] = jsh; n = di * dj * dk * dl * envs->ncomp; if ((*fprescreen)(shls, envs->vhfopt, envs->atm, envs->bas, envs->env)) { (*intor)(pbuf, NULL, shls, envs->atm, envs->natm, envs->bas, envs->nbas, envs->env, envs->cintopt, NULL); } else { memset(pbuf, 0, sizeof(double complex)*n); } pbuf += n; } pbuf = buf; for (jsh = 0; jsh < jshtot; jsh++) { dj = ao_loc[jsh+1] - ao_loc[jsh]; for (icomp = 0; icomp < envs->ncomp; icomp++) { peri = eri + nao2 * nkl * icomp + ao_loc[ish] * nao + ao_loc[jsh]; for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { pbuf1 = pbuf + di * dj * (l*dk+k); for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri[i*nao+j] = pbuf1[j*di+i]; } } peri += nao2; } } pbuf += di * dj * dk * dl; } } eri += nao2 * dk * dl; } free(buf); } void AO2MOfill_r_s1(int (*intor)(), int (*fprescreen)(), double complex *eri, int nkl, int ish, struct _AO2MOEnvs *envs) { fill_s1(intor, fprescreen, eri, nkl, ish, envs->nbas, envs); } void AO2MOfill_r_s2ij(int (*intor)(), int (*fprescreen)(), double complex *eri, int nkl, int ish, struct _AO2MOEnvs *envs) { fill_s1(intor, fprescreen, eri, nkl, ish, ish+1, envs); } void AO2MOfill_r_s2kl(int (*intor)(), int (*fprescreen)(), double complex *eri, int nkl, int ish, struct _AO2MOEnvs *envs) { fill_s2(intor, fprescreen, eri, nkl, ish, envs->nbas, envs); } void AO2MOfill_r_s4(int (*intor)(), int (*fprescreen)(), double complex *eri, int nkl, int ish, struct _AO2MOEnvs *envs) { fill_s2(intor, fprescreen, eri, nkl, ish, ish+1, envs); } void AO2MOfill_r_a2ij(int (*intor)(), int (*fprescreen)(), double complex *eri, int nkl, int ish, struct _AO2MOEnvs *envs) { fill_s1(intor, fprescreen, eri, nkl, ish, ish+1, envs); } void AO2MOfill_r_a2kl(int (*intor)(), int (*fprescreen)(), double complex *eri, int nkl, int ish, struct _AO2MOEnvs *envs) { fill_s2(intor, fprescreen, eri, nkl, ish, envs->nbas, envs); } void AO2MOfill_r_a4ij(int (*intor)(), int (*fprescreen)(), double complex *eri, int nkl, int ish, struct _AO2MOEnvs *envs) { fill_s2(intor, fprescreen, eri, nkl, ish, ish+1, envs); } void AO2MOfill_r_a4kl(int (*intor)(), int (*fprescreen)(), double complex *eri, int nkl, int ish, struct _AO2MOEnvs *envs) { fill_s2(intor, fprescreen, eri, nkl, ish, ish+1, envs); } void AO2MOfill_r_a4(int (*intor)(), int (*fprescreen)(), double complex *eri, int nkl, int ish, struct _AO2MOEnvs *envs) { fill_s2(intor, fprescreen, eri, nkl, ish, ish+1, envs); } /* * time reversal symmetry for AOs * tao index start from 1 */ #define BeginTimeRevLoop(I, J) \ for (I##0 = I##start; I##0 < I##end;) { \ I##1 = abs(tao[I##0]); \ for (J##0 = J##start; J##0 < J##end;) { \ J##1 = abs(tao[J##0]); #define EndTimeRevLoop(I, J) \ J##0 = J##1; } \ I##0 = I##1; } static void timerev_mat(double complex *mat, int *tao, int *ao_loc, int nbas) { int nao = ao_loc[nbas]; int ish, jsh, istart, iend, jstart, jend; int i, j, i0, j0, i1, j1; double complex *pmat, *pmat1, *pbuf, *pbuf1; for (ish = 0; ish < nbas; ish++) { for (jsh = 0; jsh < ish; jsh++) { istart = ao_loc[ish ]; iend = ao_loc[ish+1]; jstart = ao_loc[jsh ]; jend = ao_loc[jsh+1]; if ((tao[jstart]<0) == (tao[istart]<0)) { BeginTimeRevLoop(i, j); pbuf = mat + j0 * nao + i0; pbuf1 = pbuf + nao; pmat = mat + (i1-1)*nao + (j1-1); pmat1 = pmat - nao; for (j = 0; j < j1-j0; j+=2) { for (i = 0; i < i1-i0; i+=2) { pbuf [j*nao+i ] = pmat [-i*nao-j ]; pbuf1[j*nao+i ] =-pmat [-i*nao-j-1]; pbuf [j*nao+i+1] =-pmat1[-i*nao-j ]; pbuf1[j*nao+i+1] = pmat1[-i*nao-j-1]; } } EndTimeRevLoop(i, j); } else { BeginTimeRevLoop(i, j); pbuf = mat + j0 * nao + i0; pbuf1 = pbuf + nao; pmat = mat + (i1-1)*nao + (j1-1); pmat1 = pmat - nao; for (j = 0; j < j1-j0; j+=2) { for (i = 0; i < i1-i0; i+=2) { pbuf [j*nao+i ] =-pmat [-i*nao-j ]; pbuf1[j*nao+i ] = pmat [-i*nao-j-1]; pbuf [j*nao+i+1] = pmat1[-i*nao-j ]; pbuf1[j*nao+i+1] =-pmat1[-i*nao-j-1]; } } EndTimeRevLoop(i, j); } } } } static void atimerev_mat(double complex *mat, int *tao, int *ao_loc, int nbas) { int nao = ao_loc[nbas]; int ish, jsh, istart, iend, jstart, jend; int i, j, i0, j0, i1, j1; double complex *pmat, *pmat1, *pbuf, *pbuf1; for (ish = 0; ish < nbas; ish++) { for (jsh = 0; jsh < ish; jsh++) { istart = ao_loc[ish ]; iend = ao_loc[ish+1]; jstart = ao_loc[jsh ]; jend = ao_loc[jsh+1]; if ((tao[jstart]<0) == (tao[istart]<0)) { BeginTimeRevLoop(i, j); pbuf = mat + j0 * nao + i0; pbuf1 = pbuf + nao; pmat = mat + (i1-1)*nao + (j1-1); pmat1 = pmat - nao; for (j = 0; j < j1-j0; j+=2) { for (i = 0; i < i1-i0; i+=2) { pbuf [j*nao+i ] =-pmat [-i*nao-j ]; pbuf1[j*nao+i ] = pmat [-i*nao-j-1]; pbuf [j*nao+i+1] = pmat1[-i*nao-j ]; pbuf1[j*nao+i+1] =-pmat1[-i*nao-j-1]; } } EndTimeRevLoop(i, j); } else { BeginTimeRevLoop(i, j); pbuf = mat + j0 * nao + i0; pbuf1 = pbuf + nao; pmat = mat + (i1-1)*nao + (j1-1); pmat1 = pmat - nao; for (j = 0; j < j1-j0; j+=2) { for (i = 0; i < i1-i0; i+=2) { pbuf [j*nao+i ] = pmat [-i*nao-j ]; pbuf1[j*nao+i ] =-pmat [-i*nao-j-1]; pbuf [j*nao+i+1] =-pmat1[-i*nao-j ]; pbuf1[j*nao+i+1] = pmat1[-i*nao-j-1]; } } EndTimeRevLoop(i, j); } } } } static void copy_mat(double complex *buf, double complex *mat, int *ao_loc, int nbas) { int nao = ao_loc[nbas]; int ish, istart, iend, i, j; for (ish = 0; ish < nbas; ish++) { istart = ao_loc[ish ]; iend = ao_loc[ish+1]; for (i = istart; i < iend; i++) { for (j = 0; j < iend; j++) { buf[i*nao+j] = mat[i*nao+j]; } } } } /* * ************************************************ * s1, s2ij, s2kl, s4 here to label the AO symmetry */ void AO2MOtranse1_r_s1(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { size_t ij_pair = (*fmmm)(NULL, NULL, envs, 1); size_t nao2 = envs->nao * envs->nao; (*fmmm)(vout+ij_pair*row_id, vin+nao2*row_id, envs, 0); } void AO2MOtranse1_r_s2ij(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { int nao = envs->nao; size_t ij_pair = (*fmmm)(NULL, NULL, envs, 1); size_t nao2 = nao * nao; double complex *buf = malloc(sizeof(double complex) * nao*nao); copy_mat(buf, vin+nao2*row_id, envs->ao_loc, envs->nbas); timerev_mat(buf, envs->tao, envs->ao_loc, envs->nbas); (*fmmm)(vout+ij_pair*row_id, buf, envs, 0); free(buf); } void AO2MOtranse1_r_s2kl(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { AO2MOtranse1_r_s1(fmmm, vout, vin, row_id, envs); } void AO2MOtranse1_r_s4(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { AO2MOtranse1_r_s2ij(fmmm, vout, vin, row_id, envs); } void AO2MOtranse1_r_a2ij(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { int nao = envs->nao; size_t ij_pair = (*fmmm)(NULL, NULL, envs, 1); size_t nao2 = nao * nao; double complex *buf = malloc(sizeof(double complex) * nao*nao); copy_mat(buf, vin+nao2*row_id, envs->ao_loc, envs->nbas); atimerev_mat(buf, envs->tao, envs->ao_loc, envs->nbas); (*fmmm)(vout+ij_pair*row_id, buf, envs, 0); free(buf); } void AO2MOtranse1_r_a2kl(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { AO2MOtranse1_r_s1(fmmm, vout, vin, row_id, envs); } // anti-time-reversal between ij and time-reversal between kl void AO2MOtranse1_r_a4ij(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { AO2MOtranse1_r_a2ij(fmmm, vout, vin, row_id, envs); } // time-reversal between ij and anti-time-reversal between kl void AO2MOtranse1_r_a4kl(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { AO2MOtranse1_r_s2ij(fmmm, vout, vin, row_id, envs); } // anti-time-reversal between ij and anti-time-reversal between kl void AO2MOtranse1_r_a4(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { AO2MOtranse1_r_a2ij(fmmm, vout, vin, row_id, envs); } void AO2MOtranse2_r_s1(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { AO2MOtranse1_r_s1(fmmm, vout, vin, row_id, envs); } /* * ************************************************ * sort (shell-based) integral blocks then transform */ void AO2MOsortranse2_r_s1(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { int nao = envs->nao; int *ao_loc = envs->ao_loc; size_t ij_pair = (*fmmm)(NULL, NULL, envs, 1); size_t nao2 = envs->nao * envs->nao; int ish, jsh, di, dj; int i, j; double complex *buf = malloc(sizeof(double complex) * nao2); double complex *pbuf; vin += nao2 * row_id; for (ish = 0; ish < envs->nbas; ish++) { di = ao_loc[ish+1] - ao_loc[ish]; for (jsh = 0; jsh < envs->nbas; jsh++) { dj = ao_loc[jsh+1] - ao_loc[jsh]; pbuf = buf + ao_loc[ish] * nao + ao_loc[jsh]; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pbuf[i*nao+j] = vin[i*dj+j]; } } vin += di * dj; } } (*fmmm)(vout+ij_pair*row_id, buf, envs, 0); free(buf); } void AO2MOsortranse2_r_s2ij(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { AO2MOsortranse2_r_s1(fmmm, vout, vin, row_id, envs); } void AO2MOsortranse2_r_s2kl(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { int nao = envs->nao; int *ao_loc = envs->ao_loc; size_t ij_pair = (*fmmm)(NULL, NULL, envs, 1); size_t nao2 = 0; int ish, jsh, di, dj; int i, j; double complex *buf = malloc(sizeof(double complex) * nao * nao); double complex *pbuf; nao2 = nao * (nao+1) / 2; for (ish = 0; ish < envs->nbas; ish++) { di = ao_loc[ish+1] - ao_loc[ish]; nao2 += di * (di-1) / 2; // upper triangle for diagonal shells } vin += nao2 * row_id; for (ish = 0; ish < envs->nbas; ish++) { di = ao_loc[ish+1] - ao_loc[ish]; for (jsh = 0; jsh <= ish; jsh++) { dj = ao_loc[jsh+1] - ao_loc[jsh]; pbuf = buf + ao_loc[ish] * nao + ao_loc[jsh]; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pbuf[i*nao+j] = vin[i*dj+j]; } } vin += di * dj; } } timerev_mat(buf, envs->tao, envs->ao_loc, envs->nbas); (*fmmm)(vout+ij_pair*row_id, buf, envs, 0); free(buf); } void AO2MOsortranse2_r_s4(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { AO2MOsortranse2_r_s2kl(fmmm, vout, vin, row_id, envs); } void AO2MOsortranse2_r_a2ij(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { AO2MOsortranse2_r_s1(fmmm, vout, vin, row_id, envs); } void AO2MOsortranse2_r_a2kl(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { int nao = envs->nao; int *ao_loc = envs->ao_loc; size_t ij_pair = (*fmmm)(NULL, NULL, envs, 1); size_t nao2 = 0; int ish, jsh, di, dj; int i, j; double complex *buf = malloc(sizeof(double complex) * nao * nao); double complex *pbuf; nao2 = nao * (nao+1) / 2; for (ish = 0; ish < envs->nbas; ish++) { di = ao_loc[ish+1] - ao_loc[ish]; nao2 += di * (di-1) / 2; // upper triangle for diagonal shells } vin += nao2 * row_id; for (ish = 0; ish < envs->nbas; ish++) { di = ao_loc[ish+1] - ao_loc[ish]; for (jsh = 0; jsh <= ish; jsh++) { dj = ao_loc[jsh+1] - ao_loc[jsh]; pbuf = buf + ao_loc[ish] * nao + ao_loc[jsh]; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pbuf[i*nao+j] = vin[i*dj+j]; } } vin += di * dj; } } atimerev_mat(buf, envs->tao, envs->ao_loc, envs->nbas); (*fmmm)(vout+ij_pair*row_id, buf, envs, 0); free(buf); } // anti-time-reversal between ij and time-reversal between kl void AO2MOsortranse2_r_a4ij(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { AO2MOsortranse2_r_s2kl(fmmm, vout, vin, row_id, envs); } // time-reversal between ij and anti-time-reversal between kl void AO2MOsortranse2_r_a4kl(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { AO2MOsortranse2_r_a2kl(fmmm, vout, vin, row_id, envs); } // anti-time-reversal between ij and anti-time-reversal between kl void AO2MOsortranse2_r_a4(int (*fmmm)(), double complex *vout, double complex *vin, int row_id, struct _AO2MOEnvs *envs) { AO2MOsortranse2_r_a2kl(fmmm, vout, vin, row_id, envs); } /* * Kramers pair should not be assumed */ void AO2MOr_e1_drv(int (*intor)(), void (*fill)(), void (*ftrans)(), int (*fmmm)(), double complex *eri, double complex *mo_coeff, int klsh_start, int klsh_count, int nkl, int ncomp, int *orbs_slice, int *tao, int *ao_loc, CINTOpt *cintopt, CVHFOpt *vhfopt, int *atm, int natm, int *bas, int nbas, double *env) { const int i_start = orbs_slice[0]; const int i_count = orbs_slice[1] - orbs_slice[0]; const int j_start = orbs_slice[2]; const int j_count = orbs_slice[3] - orbs_slice[2]; int nao = ao_loc[nbas]; int nmo = MAX(orbs_slice[1], orbs_slice[3]); int i; double *mo_r = malloc(sizeof(double) * nao * nmo); double *mo_i = malloc(sizeof(double) * nao * nmo); for (i = 0; i < nao*nmo; i++) { mo_r[i] = creal(mo_coeff[i]); mo_i[i] = cimag(mo_coeff[i]); } struct _AO2MOEnvs envs = {natm, nbas, atm, bas, env, nao, klsh_start, klsh_count, i_start, i_count, j_start, j_count, ncomp, tao, ao_loc, mo_coeff, mo_r, mo_i, cintopt, vhfopt}; double complex *eri_ao = malloc(sizeof(double complex) * nao*nao*nkl*ncomp); assert(eri_ao); int ish, kl; int (*fprescreen)(); if (vhfopt) { fprescreen = vhfopt->fprescreen; } else { fprescreen = CVHFnoscreen; } #pragma omp parallel default(none) \ shared(fill, fprescreen, eri_ao, envs, intor, nkl, nbas) \ private(ish) #pragma omp for nowait schedule(dynamic) for (ish = 0; ish < nbas; ish++) { (*fill)(intor, fprescreen, eri_ao, nkl, ish, &envs, 0); } #pragma omp parallel default(none) \ shared(ftrans, fmmm, eri, eri_ao, nkl, ncomp, envs) \ private(kl) #pragma omp for nowait schedule(static) for (kl = 0; kl < nkl*ncomp; kl++) { (*ftrans)(fmmm, eri, eri_ao, kl, &envs); } free(eri_ao); free(mo_r); free(mo_i); } void AO2MOr_e2_drv(void (*ftrans)(), int (*fmmm)(), double complex *vout, double complex *vin, double complex *mo_coeff, int nijcount, int nao, int *orbs_slice, int *tao, int *ao_loc, int nbas) { int nmo = MAX(orbs_slice[1], orbs_slice[3]); int i; double *mo_r = malloc(sizeof(double) * nao * nmo); double *mo_i = malloc(sizeof(double) * nao * nmo); for (i = 0; i < nao*nmo; i++) { mo_r[i] = creal(mo_coeff[i]); mo_i[i] = cimag(mo_coeff[i]); } struct _AO2MOEnvs envs; envs.bra_start = orbs_slice[0]; envs.bra_count = orbs_slice[1] - orbs_slice[0]; envs.ket_start = orbs_slice[2]; envs.ket_count = orbs_slice[3] - orbs_slice[2]; envs.nao = nao; envs.nbas = nbas; envs.tao = tao; envs.ao_loc = ao_loc; envs.mo_coeff = mo_coeff; envs.mo_r = mo_r; envs.mo_i = mo_i; #pragma omp parallel default(none) \ shared(ftrans, fmmm, vout, vin, nijcount, envs) \ private(i) #pragma omp for nowait schedule(static) for (i = 0; i < nijcount; i++) { (*ftrans)(fmmm, vout, vin, i, &envs); } free(mo_r); free(mo_i); }
toimg.c
/* Copyright 2013-2015 The Regents of the University of California. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2013, 2015 Martin Uecker <uecker@eecs.berkeley.edu> * 2015 Jonathan Tamir <jtamir@eecs.berkeley.edu> */ #include <stdlib.h> #include <assert.h> #include <stdio.h> #include <stdint.h> #include <strings.h> #include <complex.h> #include <stdbool.h> #include "num/multind.h" #include "num/init.h" #include "misc/misc.h" #include "misc/debug.h" #include "misc/mmio.h" #include "misc/png.h" #include "misc/dicom.h" #ifndef DIMS #define DIMS 16 #endif #ifndef CFL_SIZE #define CFL_SIZE sizeof(complex float) #endif static const char usage_str[] = "[-h] <input> <output_prefix>"; static const char help_str[] = "Create magnitude images as png or proto-dicom.\n" "The first two non-singleton dimensions will\n" "be used for the image, and the other dimensions\n" "will be looped over.\n"; static void toimg(bool dicom, const char* name, long inum, float max, long h, long w, const complex float* data) { int len = strlen(name); assert(len >= 1); int nr_bytes = dicom ? 2 : 3; unsigned char (*buf)[h][w][nr_bytes] = TYPE_ALLOC(unsigned char[h][w][nr_bytes]); float max_val = dicom ? 65535. : 255.; for (int i = 0; i < h; i++) { for (int j = 0; j < w; j++) { unsigned int value = max_val * (cabsf(data[j * h + i]) / max); if (!dicom) { (*buf)[i][j][0] = value; (*buf)[i][j][1] = value; (*buf)[i][j][2] = value; } else { (*buf)[i][j][0] = (value >> 0) & 0xFF; (*buf)[i][j][2] = (value >> 8) & 0xFF; } } } (dicom ? dicom_write : png_write_rgb24)(name, w, h, inum, &(*buf)[0][0][0]); free(buf); } static void toimg_stack(const char* name, bool dicom, const long dims[DIMS], const complex float* data) { long data_size = md_calc_size(DIMS, dims); long sq_dims[DIMS] = { [0 ... DIMS - 1] = 1 }; int l = 0; for (int i = 0; i < DIMS; i++) if (1 != dims[i]) sq_dims[l++] = dims[i]; float max = 0.; for (long i = 0; i < data_size; i++) max = MAX(cabsf(data[i]), max); if (0. == max) max = 1.; int len = strlen(name); assert(len >= 1); long num_imgs = md_calc_size(DIMS - 2, sq_dims + 2); long img_size = md_calc_size(2, sq_dims); debug_printf(DP_INFO, "Writing %d image(s)...", num_imgs); #pragma omp parallel for for (long i = 0; i < num_imgs; i++) { char name_i[len + 10]; // extra space for ".0000.png" if (num_imgs > 1) sprintf(name_i, "%s-%04ld.%s", name, i, dicom ? "dcm" : "png"); else sprintf(name_i, "%s.%s", name, dicom ? "dcm" : "png"); toimg(dicom, name_i, i, max, sq_dims[0], sq_dims[1], data + i * img_size); } debug_printf(DP_INFO, "done.\n", num_imgs); } int main_toimg(int argc, char* argv[]) { bool dicom = mini_cmdline_bool(argc, argv, 'd', 2, usage_str, help_str); num_init(); // -d option is deprecated char* ext = rindex(argv[2], '.'); if (NULL != ext) { assert(!dicom); if (0 == strcmp(ext, ".dcm")) dicom = true; else if (0 != strcmp(ext, ".png")) error("Unknown file extension."); *ext = '\0'; } long dims[DIMS]; complex float* data = load_cfl(argv[1], DIMS, dims); toimg_stack(argv[2], dicom, dims, data); unmap_cfl(DIMS, dims, data); exit(0); }
parallel_for_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd foo void test_no_clause() { int i; #pragma omp parallel for simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp parallel for simd' must be a for loop}} #pragma omp parallel for simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp parallel for simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd; for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd linear(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; // expected-error@+1 {{expected '('}} #pragma omp parallel for simd safelen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd safelen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd safelen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd safelen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd safelen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp parallel for simd safelen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp parallel for simd safelen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp parallel for simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; // expected-error@+1 {{expected '('}} #pragma omp parallel for simd simdlen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd simdlen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd simdlen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd simdlen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd simdlen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp parallel for simd simdlen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp parallel for simd simdlen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp parallel for simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp parallel for simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp parallel for simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp parallel // expected-error@+1 {{expected '('}} #pragma omp parallel for simd collapse for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel #pragma omp parallel for simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel for simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel for simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel for simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd collapse(2) for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp parallel for simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd linear(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp parallel for simd linear(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp parallel for simd linear(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp parallel for simd linear(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd linear(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd linear(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} #pragma omp parallel for simd linear(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} #pragma omp parallel for simd private(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} #pragma omp parallel for simd linear(x) private(x) for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} #pragma omp parallel for simd linear(x, y : 0) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} #pragma omp parallel for simd linear(x) lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} #pragma omp parallel for simd lastprivate(x) linear(x) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd aligned(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp parallel for simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd private( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd private(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd private() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void test_nontemporal() { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd nontemporal( for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd nontemporal(, for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd nontemporal(, ) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected expression}} #pragma omp parallel for simd nontemporal() for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected expression}} #pragma omp parallel for simd nontemporal(int) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} omp50-error@+1 {{expected variable name}} #pragma omp parallel for simd nontemporal(0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp parallel for simd nontemporal(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp parallel for simd nontemporal(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp parallel for simd nontemporal(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd nontemporal(x :) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} #pragma omp parallel for simd nontemporal(x :, ) for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}} #pragma omp parallel for simd nontemporal(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd private(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd nontemporal(x) private(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} #pragma omp parallel for simd nontemporal(x, y : 0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd nontemporal(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd lastprivate(x) nontemporal(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} expected-error {{expected '(' after 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp parallel for simd order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp parallel for simd order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp parallel for simd order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} for (int i = 0; i < 10; ++i) ; #pragma omp parallel for simd order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} for (int i = 0; i < 10; ++i) ; }
rose_scalar_anti.c
/* * Scalar-to-scalar dependencies * */ #include <stdio.h> #include "omp.h" int a[100]; #if 1 void foo2() { int i; int tmp; tmp = 10; // It would be wrong to parallelize the following loop // since the true dependence between tmp in an iteration // and tmp in the following iteration. // Even firstprivate cannot help this. for (i = 0; i <= 99; i += 1) { a[i] = tmp; tmp = a[i] + i; } printf("a[0]=%d\n",a[0]); printf("a[20]=%d\n",a[20]); printf("a[40]=%d\n",a[40]); printf("a[60]=%d\n",a[60]); printf("a[80]=%d\n",a[80]); printf("a[99]=%d\n",a[99]); } #endif void foo() { int i; int tmp; tmp = 10; // This should be parallelized using firstprivate #pragma omp parallel for private (i) for (i = 0; i <= 99; i += 1) { a[i] = tmp; } i = tmp; } int main() { foo2(); return 0; }
GB_binop__lt_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lt_int32 // A.*B function (eWiseMult): GB_AemultB__lt_int32 // A*D function (colscale): GB_AxD__lt_int32 // D*A function (rowscale): GB_DxB__lt_int32 // C+=B function (dense accum): GB_Cdense_accumB__lt_int32 // C+=b function (dense accum): GB_Cdense_accumb__lt_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lt_int32 // C=scalar+B GB_bind1st__lt_int32 // C=scalar+B' GB_bind1st_tran__lt_int32 // C=A+scalar GB_bind2nd__lt_int32 // C=A'+scalar GB_bind2nd_tran__lt_int32 // C type: bool // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x < y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_INT32 || GxB_NO_LT_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lt_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lt_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lt_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lt_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lt_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__lt_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lt_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lt_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lt_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB_bind1st_tran__lt_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB_bind2nd_tran__lt_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
polysa_t2s.c
#include <limits.h> #include <stdio.h> #include <string.h> #include <isl/aff.h> #include <isl/ctx.h> #include <isl/flow.h> #include <isl/map.h> #include <isl/vec.h> #include <isl/ast_build.h> #include <isl/schedule.h> #include <isl/schedule_node.h> #include <isl/constraint.h> #include <isl/id_to_id.h> #include <pet.h> #include <pet/expr.h> #include "ppcg.h" #include "ppcg_options.h" #include "print.h" #include "schedule.h" #include "util.h" #include "polysa_t2s.h" struct t2s_stmt { char *content; }; static struct t2s_array_ref_group *t2s_array_ref_group_free( struct t2s_array_ref_group *group ) { if (!group) return NULL; isl_map_free(group->access); if (group->n_ref > 1) free(group->refs); free(group); return NULL; } static struct t2s_group_data *t2s_group_data_free(struct t2s_group_data *d) { if (!d) return NULL; isl_union_map_free(d->full_sched); free(d); return NULL; } struct polysa_stmt { struct ppcg_stmt *stmt; /* T2S */ struct t2s_stmt *t_stmt; }; /* Representation of a statement inside a generated AST. * * "stmt" refers to the original statement. * "ref2expr" maps the reference identifier of each access in * the statement to an AST expression that should be printed * at the place of the access. */ struct ppcg_stmt { struct pet_stmt *stmt; isl_id_to_ast_expr *ref2expr; }; static void ppcg_stmt_free(void *user) { struct ppcg_stmt *stmt = user; if (!stmt) return; isl_id_to_ast_expr_free(stmt->ref2expr); free(stmt); } static void t2s_stmt_free(void *user) { struct t2s_stmt *stmt = user; if (!stmt) return; free(stmt); } static void polysa_stmt_free(void *user) { struct polysa_stmt *p_stmt = user; if (!p_stmt) return; ppcg_stmt_free(p_stmt->stmt); t2s_stmt_free(p_stmt->t_stmt); free(p_stmt); } /* Mark if the scheudule at each depth is a sequential node or not. */ static isl_bool update_seq_band(__isl_keep isl_schedule_node *node, void *user) { enum isl_schedule_node_type node_type = isl_schedule_node_get_type(node); enum isl_schedule_node_type *type_depth = user; int total_band_depth = isl_schedule_node_get_schedule_depth(node); int total_seq_depth = 0; isl_schedule_node *node_tmp = isl_schedule_node_copy(node); while (isl_schedule_node_has_parent(node_tmp)) { node_tmp = isl_schedule_node_parent(node_tmp); if (isl_schedule_node_get_type(node_tmp) == isl_schedule_node_sequence) total_seq_depth += 1; } isl_schedule_node_free(node_tmp); int cur_depth = total_band_depth + total_seq_depth; if (node_type == isl_schedule_node_band) { for (int i = 0; i < isl_schedule_node_band_n_member(node); i++) { type_depth[cur_depth + i] = node_type; } } else if (node_type == isl_schedule_node_sequence) { type_depth[cur_depth + 0] = node_type; } return isl_bool_true; } /* Peel off the iterators for scalar dimensions in a vector. */ static __isl_give isl_vec *t2s_peel_off_scalar_dims_vec(__isl_take isl_vec *vec, __isl_keep isl_schedule *schedule) { isl_schedule_node *root = isl_schedule_get_root(schedule); isl_union_map *full_sched = isl_schedule_node_get_subtree_schedule_union_map(root); isl_set *sched_range = isl_set_from_union_set(isl_union_map_range(full_sched)); int sched_depth = isl_set_dim(sched_range, isl_dim_set); isl_set_free(sched_range); isl_schedule_node_free(root); isl_ctx *ctx = isl_vec_get_ctx(vec); enum isl_schedule_node_type *type_depth = isl_calloc_array(isl_schedule_get_ctx(schedule), enum isl_schedule_node_type, sched_depth); for (int i = 0; i < sched_depth; i++) { type_depth[i] = -1; } isl_schedule_foreach_schedule_node_top_down( schedule, &update_seq_band, type_depth); isl_vec *new_vec = isl_vec_alloc(isl_vec_get_ctx(vec), 0); for (int i = 0; i < sched_depth; i++) { if (type_depth[i] != isl_schedule_node_sequence) { isl_vec *vec_i = isl_vec_alloc(ctx, 1); vec_i = isl_vec_set_element_val(vec_i, 0, isl_vec_get_element_val(vec, i)); new_vec = isl_vec_concat(new_vec, vec_i); } } free(type_depth); isl_vec_free(vec); return new_vec; } /* Peel off the iterators for scalar dimenisions in the iteration domain "set". */ static __isl_give isl_set *t2s_peel_off_scalar_dims(__isl_take isl_set *set, __isl_keep isl_schedule *schedule) { isl_schedule_node *root = isl_schedule_get_root(schedule); isl_union_map *full_sched = isl_schedule_node_get_subtree_schedule_union_map(root); isl_set *sched_range = isl_set_from_union_set(isl_union_map_range(full_sched)); int sched_depth = isl_set_dim(sched_range, isl_dim_set); isl_set_free(sched_range); isl_schedule_node_free(root); enum isl_schedule_node_type *type_depth = isl_calloc_array(isl_schedule_get_ctx(schedule), enum isl_schedule_node_type, sched_depth); for (int i = 0; i < sched_depth; i++) { type_depth[i] = -1; } isl_schedule_foreach_schedule_node_top_down( schedule, &update_seq_band, type_depth); int proj_dim = 0; for (int i = 0; i < sched_depth; i++) { if (type_depth[i] == isl_schedule_node_sequence) { set = isl_set_project_out(set, isl_dim_set, i - proj_dim, 1); proj_dim++; } } free(type_depth); return set; } /* Derive the output file name from the input file name. * 'input' is the entire path of the input file. The output * is the file name plus the additional extension. * * We will basically replace everything after the last point * with '.polysa.c'. This means file.c becomes file.polysa.c */ static FILE *get_output_file(const char *input, const char *output) { char name[PATH_MAX]; const char *ext; const char ppcg_marker[] = ".polysa"; int len; FILE *file; len = ppcg_extract_base_name(name, input); strcpy(name + len, ppcg_marker); ext = strrchr(input, '.'); strcpy(name + len + sizeof(ppcg_marker) - 1, ext ? ext : ".c"); if (!output) output = name; file = fopen(output, "w"); if (!file) { fprintf(stderr, "Unable to open '%s' for writing\n", output); return NULL; } return file; } /* Data used to annotate for nodes in the ast. */ struct ast_node_userinfo { /* The for node is an openmp parallel for node. */ int is_openmp; }; /* Information used while building the ast. */ struct ast_build_userinfo { /* The current ppcg scop. */ struct ppcg_scop *scop; /* Are we currently in a parallel for loop? */ int in_parallel_for; }; /* Check if the current scheduling dimension is parallel. * * We check for parallelism by verifying that the loop does not carry any * dependences. * If the live_range_reordering option is set, then this currently * includes the order dependences. In principle, non-zero order dependences * could be allowed, but this would require privatization and/or expansion. * * Parallelism test: if the distance is zero in all outer dimensions, then it * has to be zero in the current dimension as well. * Implementation: first, translate dependences into time space, then force * outer dimensions to be equal. If the distance is zero in the current * dimension, then the loop is parallel. * The distance is zero in the current dimension if it is a subset of a map * with equal values for the current dimension. */ static int ast_schedule_dim_is_parallel(__isl_keep isl_ast_build *build, struct ppcg_scop *scop) { isl_union_map *schedule, *deps; isl_map *schedule_deps, *test; isl_space *schedule_space; unsigned i, dimension, is_parallel; schedule = isl_ast_build_get_schedule(build); schedule_space = isl_ast_build_get_schedule_space(build); dimension = isl_space_dim(schedule_space, isl_dim_out) - 1; deps = isl_union_map_copy(scop->dep_flow); deps = isl_union_map_union(deps, isl_union_map_copy(scop->dep_false)); if (scop->options->live_range_reordering) { isl_union_map *order = isl_union_map_copy(scop->dep_order); deps = isl_union_map_union(deps, order); } deps = isl_union_map_apply_range(deps, isl_union_map_copy(schedule)); deps = isl_union_map_apply_domain(deps, schedule); if (isl_union_map_is_empty(deps)) { isl_union_map_free(deps); isl_space_free(schedule_space); return 1; } schedule_deps = isl_map_from_union_map(deps); for (i = 0; i < dimension; i++) schedule_deps = isl_map_equate(schedule_deps, isl_dim_out, i, isl_dim_in, i); test = isl_map_universe(isl_map_get_space(schedule_deps)); test = isl_map_equate(test, isl_dim_out, dimension, isl_dim_in, dimension); is_parallel = isl_map_is_subset(schedule_deps, test); isl_space_free(schedule_space); isl_map_free(test); isl_map_free(schedule_deps); return is_parallel; } /* Mark a for node openmp parallel, if it is the outermost parallel for node. */ static void mark_openmp_parallel(__isl_keep isl_ast_build *build, struct ast_build_userinfo *build_info, struct ast_node_userinfo *node_info) { if (build_info->in_parallel_for) return; if (ast_schedule_dim_is_parallel(build, build_info->scop)) { build_info->in_parallel_for = 1; node_info->is_openmp = 1; } } /* Allocate an ast_node_info structure and initialize it with default values. */ static struct ast_node_userinfo *allocate_ast_node_userinfo() { struct ast_node_userinfo *node_info; node_info = (struct ast_node_userinfo *) malloc(sizeof(struct ast_node_userinfo)); node_info->is_openmp = 0; return node_info; } /* Free an ast_node_info structure. */ static void free_ast_node_userinfo(void *ptr) { struct ast_node_userinfo *info; info = (struct ast_node_userinfo *) ptr; free(info); } /* This method is executed before the construction of a for node. It creates * an isl_id that is used to annotate the subsequently generated ast for nodes. * * In this function we also run the following analyses: * * - Detection of openmp parallel loops */ static __isl_give isl_id *ast_build_before_for( __isl_keep isl_ast_build *build, void *user) { isl_id *id; struct ast_build_userinfo *build_info; struct ast_node_userinfo *node_info; build_info = (struct ast_build_userinfo *) user; node_info = allocate_ast_node_userinfo(); id = isl_id_alloc(isl_ast_build_get_ctx(build), "", node_info); id = isl_id_set_free_user(id, free_ast_node_userinfo); mark_openmp_parallel(build, build_info, node_info); return id; } /* This method is executed after the construction of a for node. * * It performs the following actions: * * - Reset the 'in_parallel_for' flag, as soon as we leave a for node, * that is marked as openmp parallel. * */ static __isl_give isl_ast_node *ast_build_after_for( __isl_take isl_ast_node *node, __isl_keep isl_ast_build *build, void *user) { isl_id *id; struct ast_build_userinfo *build_info; struct ast_node_userinfo *info; id = isl_ast_node_get_annotation(node); info = isl_id_get_user(id); if (info && info->is_openmp) { build_info = (struct ast_build_userinfo *) user; build_info->in_parallel_for = 0; } isl_id_free(id); return node; } /* Find the element in scop->stmts that has the given "id". */ static struct pet_stmt *find_stmt(struct ppcg_scop *scop, __isl_keep isl_id *id) { int i; for (i = 0; i < scop->pet->n_stmt; ++i) { struct pet_stmt *stmt = scop->pet->stmts[i]; isl_id *id_i; id_i = isl_set_get_tuple_id(stmt->domain); isl_id_free(id_i); if (id_i == id) return stmt; } isl_die(isl_id_get_ctx(id), isl_error_internal, "statement not found", return NULL); } /* Print a user statement in the generated AST. * The ppcg_stmt has been attached to the node in at_each_domain. */ static __isl_give isl_printer *print_user(__isl_take isl_printer *p, __isl_take isl_ast_print_options *print_options, __isl_keep isl_ast_node *node, void *user) { struct ppcg_stmt *stmt; isl_id *id; id = isl_ast_node_get_annotation(node); stmt = isl_id_get_user(id); isl_id_free(id); p = pet_stmt_print_body(stmt->stmt, p, stmt->ref2expr); isl_ast_print_options_free(print_options); return p; } /* Print a for loop node as an openmp parallel loop. * * To print an openmp parallel loop we print a normal for loop, but add * "#pragma openmp parallel for" in front. * * Variables that are declared within the body of this for loop are * automatically openmp 'private'. Iterators declared outside of the * for loop are automatically openmp 'shared'. As ppcg declares all iterators * at the position where they are assigned, there is no need to explicitly mark * variables. Their automatically assigned type is already correct. * * This function only generates valid OpenMP code, if the ast was generated * with the 'atomic-bounds' option enabled. * */ static __isl_give isl_printer *print_for_with_openmp( __isl_keep isl_ast_node *node, __isl_take isl_printer *p, __isl_take isl_ast_print_options *print_options) { p = isl_printer_start_line(p); p = isl_printer_print_str(p, "#pragma omp parallel for"); p = isl_printer_end_line(p); p = isl_ast_node_for_print(node, p, print_options); return p; } /* Print a for node. * * Depending on how the node is annotated, we either print a normal * for node or an openmp parallel for node. */ static __isl_give isl_printer *print_for(__isl_take isl_printer *p, __isl_take isl_ast_print_options *print_options, __isl_keep isl_ast_node *node, void *user) { isl_id *id; int openmp; openmp = 0; id = isl_ast_node_get_annotation(node); if (id) { struct ast_node_userinfo *info; info = (struct ast_node_userinfo *) isl_id_get_user(id); if (info && info->is_openmp) openmp = 1; } if (openmp) p = print_for_with_openmp(node, p, print_options); else p = isl_ast_node_for_print(node, p, print_options); isl_id_free(id); return p; } /* Index transformation callback for pet_stmt_build_ast_exprs. * * "index" expresses the array indices in terms of statement iterators * "iterator_map" expresses the statement iterators in terms of * AST loop iterators. * * The result expresses the array indices in terms of * AST loop iterators. */ static __isl_give isl_multi_pw_aff *pullback_index( __isl_take isl_multi_pw_aff *index, __isl_keep isl_id *id, void *user) { isl_pw_multi_aff *iterator_map = user; iterator_map = isl_pw_multi_aff_copy(iterator_map); return isl_multi_pw_aff_pullback_pw_multi_aff(index, iterator_map); } /* Transform the accesses in the statement associated to the domain * called by "node" to refer to the AST loop iterators, construct * corresponding AST expressions using "build", * collect them in a ppcg_stmt and annotate the node with the ppcg_stmt. */ static __isl_give isl_ast_node *at_each_domain(__isl_take isl_ast_node *node, __isl_keep isl_ast_build *build, void *user) { struct ppcg_scop *scop = user; isl_ast_expr *expr, *arg; isl_ctx *ctx; isl_id *id; isl_map *map; isl_pw_multi_aff *iterator_map; struct ppcg_stmt *stmt; ctx = isl_ast_node_get_ctx(node); stmt = isl_calloc_type(ctx, struct ppcg_stmt); if (!stmt) goto error; expr = isl_ast_node_user_get_expr(node); arg = isl_ast_expr_get_op_arg(expr, 0); isl_ast_expr_free(expr); id = isl_ast_expr_get_id(arg); isl_ast_expr_free(arg); stmt->stmt = find_stmt(scop, id); isl_id_free(id); if (!stmt->stmt) goto error; map = isl_map_from_union_map(isl_ast_build_get_schedule(build)); map = isl_map_reverse(map); iterator_map = isl_pw_multi_aff_from_map(map); stmt->ref2expr = pet_stmt_build_ast_exprs(stmt->stmt, build, &pullback_index, iterator_map, NULL, NULL); isl_pw_multi_aff_free(iterator_map); id = isl_id_alloc(isl_ast_node_get_ctx(node), NULL, stmt); id = isl_id_set_free_user(id, &ppcg_stmt_free); return isl_ast_node_set_annotation(node, id); error: ppcg_stmt_free(stmt); return isl_ast_node_free(node); } /* Set *depth (initialized to 0 by the caller) to the maximum * of the schedule depths of the leaf nodes for which this function is called. */ static isl_bool update_depth(__isl_keep isl_schedule_node *node, void *user) { int *depth = user; int node_depth; if (isl_schedule_node_get_type(node) != isl_schedule_node_leaf) return isl_bool_true; node_depth = isl_schedule_node_get_schedule_depth(node); if (node_depth > *depth) *depth = node_depth; return isl_bool_false; } /* This function is called for each node in a CPU AST. * In case of a user node, print the macro definitions required * for printing the AST expressions in the annotation, if any. * For other nodes, return true such that descendants are also * visited. * * In particular, print the macro definitions needed for the substitutions * of the original user statements. */ static isl_bool at_node(__isl_keep isl_ast_node *node, void *user) { struct ppcg_stmt *stmt; isl_id *id; isl_printer **p = user; if (isl_ast_node_get_type(node) != isl_ast_node_user) return isl_bool_true; id = isl_ast_node_get_annotation(node); stmt = isl_id_get_user(id); isl_id_free(id); if (!stmt) return isl_bool_error; *p = ppcg_print_body_macros(*p, stmt->ref2expr); if (!*p) return isl_bool_error; return isl_bool_false; } /* Print the required macros for the CPU AST "node" to "p", * including those needed for the user statements inside the AST. */ static __isl_give isl_printer *cpu_print_macros(__isl_take isl_printer *p, __isl_keep isl_ast_node *node) { if (isl_ast_node_foreach_descendant_top_down(node, &at_node, &p) < 0) return isl_printer_free(p); p = ppcg_print_macros(p, node); return p; } static isl_bool debug_ast_node(__isl_keep isl_ast_node *node, void *user) { enum isl_ast_node_type type = isl_ast_node_get_type(node); switch (type) { case isl_ast_node_user: printf("user node found.\n"); break; } return isl_bool_true; } /* Code generate the scop 'scop' using "schedule" * and print the corresponding C code to 'p'. */ static __isl_give isl_printer *print_scop(struct ppcg_scop *scop, __isl_take isl_schedule *schedule, __isl_take isl_printer *p, struct ppcg_options *options) { isl_ctx *ctx = isl_printer_get_ctx(p); isl_ast_build *build; isl_ast_print_options *print_options; isl_ast_node *tree; isl_id_list *iterators; struct ast_build_userinfo build_info; int depth; depth = 0; if (isl_schedule_foreach_schedule_node_top_down(schedule, &update_depth, &depth) < 0) goto error; build = isl_ast_build_alloc(ctx); iterators = ppcg_scop_generate_names(scop, depth, "c"); build = isl_ast_build_set_iterators(build, iterators); build = isl_ast_build_set_at_each_domain(build, &at_each_domain, scop); if (options->openmp) { build_info.scop = scop; build_info.in_parallel_for = 0; build = isl_ast_build_set_before_each_for(build, &ast_build_before_for, &build_info); build = isl_ast_build_set_after_each_for(build, &ast_build_after_for, &build_info); } tree = isl_ast_build_node_from_schedule(build, schedule); isl_ast_build_free(build); print_options = isl_ast_print_options_alloc(ctx); print_options = isl_ast_print_options_set_print_user(print_options, &print_user, NULL); print_options = isl_ast_print_options_set_print_for(print_options, &print_for, NULL); p = cpu_print_macros(p, tree); p = isl_ast_node_print(tree, p, print_options); isl_ast_node_free(tree); return p; error: isl_schedule_free(schedule); isl_printer_free(p); return NULL; } /* Tile the band node "node" with tile sizes "sizes" and * mark all members of the resulting tile node as "atomic". */ static __isl_give isl_schedule_node *tile(__isl_take isl_schedule_node *node, __isl_take isl_multi_val *sizes) { node = isl_schedule_node_band_tile(node, sizes); node = ppcg_set_schedule_node_type(node, isl_ast_loop_atomic); return node; } /* Tile "node", if it is a band node with at least 2 members. * The tile sizes are set from the "tile_size" option. */ static __isl_give isl_schedule_node *tile_band( __isl_take isl_schedule_node *node, void *user) { struct ppcg_scop *scop = user; int n; isl_space *space; isl_multi_val *sizes; if (isl_schedule_node_get_type(node) != isl_schedule_node_band) return node; n = isl_schedule_node_band_n_member(node); if (n <= 1) return node; space = isl_schedule_node_band_get_space(node); sizes = ppcg_multi_val_from_int(space, scop->options->tile_size); return tile(node, sizes); } /* Construct schedule constraints from the dependences in ps * for the purpose of computing a schedule for a CPU. * * The proximity constraints are set to the flow dependences. * * If live-range reordering is allowed then the conditional validity * constraints are set to the order dependences with the flow dependences * as condition. That is, a live-range (flow dependence) will be either * local to an iteration of a band or all adjacent order dependences * will be respected by the band. * The validity constraints are set to the union of the flow dependences * and the forced dependences, while the coincidence constraints * are set to the union of the flow dependences, the forced dependences and * the order dependences. * * If live-range reordering is not allowed, then both the validity * and the coincidence constraints are set to the union of the flow * dependences and the false dependences. * * Note that the coincidence constraints are only set when the "openmp" * options is set. Even though the way openmp pragmas are introduced * does not rely on the coincident property of the schedule band members, * the coincidence constraints do affect the way the schedule is constructed, * such that more schedule dimensions should be detected as parallel * by ast_schedule_dim_is_parallel. * Since the order dependences are also taken into account by * ast_schedule_dim_is_parallel, they are also added to * the coincidence constraints. If the openmp handling learns * how to privatize some memory, then the corresponding order * dependences can be removed from the coincidence constraints. */ static __isl_give isl_schedule_constraints *construct_cpu_schedule_constraints( struct ppcg_scop *ps) { isl_schedule_constraints *sc; isl_union_map *validity, *coincidence; sc = isl_schedule_constraints_on_domain(isl_union_set_copy(ps->domain)); if (ps->options->live_range_reordering) { sc = isl_schedule_constraints_set_conditional_validity(sc, isl_union_map_copy(ps->tagged_dep_flow), isl_union_map_copy(ps->tagged_dep_order)); validity = isl_union_map_copy(ps->dep_flow); validity = isl_union_map_union(validity, isl_union_map_copy(ps->dep_forced)); // if (ps->options->openmp) { coincidence = isl_union_map_copy(validity); coincidence = isl_union_map_union(coincidence, isl_union_map_copy(ps->dep_order)); // } /* Add the RAR dependences into the validity constraints for * systolic array generation. */ if (ps->options->polysa) { validity = isl_union_map_union(validity, isl_union_map_copy(ps->dep_rar)); } } else { validity = isl_union_map_copy(ps->dep_flow); validity = isl_union_map_union(validity, isl_union_map_copy(ps->dep_false)); // if (ps->options->openmp) coincidence = isl_union_map_copy(validity); /* Add the RAR dependences into the validity constraints for * systolic array generation. */ if (ps->options->polysa) { validity = isl_union_map_union(validity, isl_union_map_copy(ps->dep_rar)); } } // if (ps->options->openmp) sc = isl_schedule_constraints_set_coincidence(sc, coincidence); sc = isl_schedule_constraints_set_validity(sc, validity); sc = isl_schedule_constraints_set_proximity(sc, isl_union_map_copy(ps->dep_flow)); return sc; } /* Compute a schedule for the scop "ps". * * First derive the appropriate schedule constraints from the dependences * in "ps" and then compute a schedule from those schedule constraints, * possibly grouping statement instances based on the input schedule. */ static __isl_give isl_schedule *compute_cpu_schedule(struct ppcg_scop *ps) { isl_schedule_constraints *sc; isl_schedule *schedule; if (!ps) return NULL; sc = construct_cpu_schedule_constraints(ps); schedule = ppcg_compute_schedule(sc, ps->schedule, ps->options); return schedule; } /* Compute a new schedule to the scop "ps" if the reschedule option is set. * Otherwise, return a copy of the original schedule. */ static __isl_give isl_schedule *optionally_compute_schedule(void *user) { struct ppcg_scop *ps = user; if (!ps) return NULL; if (!ps->options->reschedule) return isl_schedule_copy(ps->schedule); return compute_cpu_schedule(ps); } /* Compute a schedule based on the dependences in "ps" and * tile it if requested by the user. */ static __isl_give isl_schedule *get_schedule(struct ppcg_scop *ps, struct ppcg_options *options) { isl_ctx *ctx; isl_schedule *schedule; if (!ps) return NULL; ctx = isl_union_set_get_ctx(ps->domain); schedule = ppcg_get_schedule(ctx, options, &optionally_compute_schedule, ps); if (ps->options->tile) schedule = isl_schedule_map_schedule_node_bottom_up(schedule, &tile_band, ps); return schedule; } /* Generate CPU code for the scop "ps" using "schedule" and * print the corresponding C code to "p", including variable declarations. */ static __isl_give isl_printer *print_cpu_with_schedule( __isl_take isl_printer *p, struct ppcg_scop *ps, __isl_take isl_schedule *schedule, struct ppcg_options *options) { int hidden; isl_set *context; p = isl_printer_start_line(p); p = isl_printer_print_str(p, "/* PPCG generated CPU code */"); p = isl_printer_end_line(p); p = isl_printer_start_line(p); p = isl_printer_end_line(p); p = ppcg_set_macro_names(p); p = ppcg_print_exposed_declarations(p, ps); hidden = ppcg_scop_any_hidden_declarations(ps); if (hidden) { p = ppcg_start_block(p); p = ppcg_print_hidden_declarations(p, ps); } context = isl_set_copy(ps->context); context = isl_set_from_params(context); schedule = isl_schedule_insert_context(schedule, context); if (options->debug->dump_final_schedule) isl_schedule_dump(schedule); p = print_scop(ps, schedule, p, options); if (hidden) p = ppcg_end_block(p); return p; } //static __isl_give isl_schedule_node *aggregate_stmt_domain(__isl_take isl_schedule_node *node, void *user) //{ // isl_union_set *domain; // isl_union_map *schedule; // isl_set *stmt_domain; // isl_set **anchor_domain = (isl_set **)(user); // // if (!node) // return NULL; // // if (isl_schedule_node_get_type(node) != isl_schedule_node_leaf) // return node; // // domain = isl_schedule_node_get_domain(node); // schedule = isl_schedule_node_get_prefix_schedule_union_map(node); // schedule = isl_union_map_intersect_domain(schedule, domain); // stmt_domain = isl_set_from_union_set(isl_union_map_range(schedule)); // if (*anchor_domain == NULL) // *anchor_domain = isl_set_copy(stmt_domain); // else // *anchor_domain = isl_set_union(*anchor_domain, isl_set_copy(stmt_domain)); // // isl_set_free(stmt_domain); // // return node; //} /* Extract the (simplified) iteration domain of each user statement. */ static isl_stat extract_each_stmt_domain(__isl_take isl_set *set, void *user) { struct t2s_data *data = user; isl_union_set *sched_domain = isl_union_set_apply(isl_union_set_from_set(isl_set_copy(set)), isl_union_map_copy(data->sched)); isl_set *stmt_domain_i = isl_set_from_union_set(sched_domain); isl_set *stmt_sim_domain_i = isl_set_gist(isl_set_copy(stmt_domain_i), isl_set_copy(data->anchor_domain)); /* Set the name of space. */ isl_space *space = isl_set_get_space(set); const char *stmt_name = isl_space_get_tuple_name(space, isl_dim_set); stmt_domain_i = isl_set_set_tuple_name(stmt_domain_i, stmt_name); stmt_sim_domain_i = isl_set_set_tuple_name(stmt_sim_domain_i, stmt_name); isl_set_free(set); isl_space_free(space); if (data->stmt_domain == NULL) data->stmt_domain = isl_union_set_from_set(stmt_domain_i); else data->stmt_domain = isl_union_set_union(data->stmt_domain, isl_union_set_from_set(stmt_domain_i)); if (data->stmt_sim_domain == NULL) data->stmt_sim_domain = isl_union_set_from_set(stmt_sim_domain_i); else data->stmt_sim_domain = isl_union_set_union(data->stmt_sim_domain, isl_union_set_from_set(stmt_sim_domain_i)); return isl_stat_ok; } /* Extract the (simplified) iteration domain of all the user statemets. */ static isl_stat extract_stmt_domain(__isl_keep isl_schedule *schedule, struct t2s_data *data) { isl_union_set *domain = isl_schedule_get_domain(schedule); isl_schedule_node *root = isl_schedule_get_root(schedule); isl_union_map *sched = isl_schedule_node_get_subtree_schedule_union_map(root); data->sched = sched; isl_schedule_node_free(root); /* Assign the scheduling space the same name as the statement. */ isl_union_set_foreach_set(domain, &extract_each_stmt_domain, data); isl_union_set_free(domain); data->sched = isl_union_map_free(data->sched); } /* Duplicate the polysa_dep. */ __isl_give struct polysa_dep *polysa_dep_copy(__isl_keep struct polysa_dep *dep) { struct polysa_dep *new_dep = (struct polysa_dep *)malloc(sizeof(struct polysa_dep)); new_dep->src = isl_id_copy(dep->src); new_dep->dest = isl_id_copy(dep->dest); new_dep->disvec = isl_vec_copy(dep->disvec); new_dep->isl_dep = isl_basic_map_copy(dep->isl_dep); new_dep->type = dep->type; new_dep->src_sched_domain = isl_set_copy(dep->src_sched_domain); new_dep->dest_sched_domain = isl_set_copy(dep->dest_sched_domain); return new_dep; } /* This function extracts the raw and rar deps that have the dest access associated * with the current access. */ static int t2s_update_dep(__isl_keep pet_expr *expr, void *user) { struct t2s_data *data = user; struct t2s_stmt_data *stmt_data = data->stmt_data; isl_id *id; id = isl_id_copy(expr->acc.ref_id); int n; for (n = 0; n < data->ndeps; n++) { struct polysa_dep *dep_i = data->deps[n]; if (dep_i->dest == id && dep_i->type == POLYSA_DEP_RAW) break; } if (n != data->ndeps) { stmt_data->stmt_deps = (struct polysa_dep ***)realloc(stmt_data->stmt_deps, (stmt_data->n_acc_group + 1) * sizeof(struct polysa_dep **)); stmt_data->stmt_deps[stmt_data->n_acc_group] = NULL; stmt_data->n_dep_per_acc_group = (int *)realloc(stmt_data->n_dep_per_acc_group, (stmt_data->n_acc_group + 1) * sizeof(int)); stmt_data->n_dep_per_acc_group[stmt_data->n_acc_group] = 0; for (int i = 0; i < data->ndeps; i++) { struct polysa_dep *dep_i = data->deps[i]; if (dep_i->dest == id && dep_i->type == POLYSA_DEP_RAW) { stmt_data->stmt_deps[stmt_data->n_acc_group] = (struct polysa_dep **)realloc(stmt_data->stmt_deps[stmt_data->n_acc_group], (stmt_data->n_dep_per_acc_group[stmt_data->n_acc_group] + 1) * sizeof(struct polysa_dep *)); stmt_data->stmt_deps[stmt_data->n_acc_group][stmt_data->n_dep_per_acc_group[stmt_data->n_acc_group]] = polysa_dep_copy(dep_i); stmt_data->n_dep_per_acc_group[stmt_data->n_acc_group] += 1; } } stmt_data->n_acc_group += 1; } isl_id_free(id); return 0; } /* Generate the t2s function for each array access. */ static int t2s_update_access(__isl_keep pet_expr *expr, void *user) { struct t2s_data *data = user; struct t2s_stmt_data *stmt_data = data->stmt_data; isl_id *id; isl_multi_pw_aff *index; isl_space *index_space; isl_id *array_id; isl_ctx *ctx; isl_ast_expr *ast_expr; id = isl_id_copy(expr->acc.ref_id); index = isl_multi_pw_aff_copy(expr->acc.index); ctx = isl_id_get_ctx(id); isl_multi_pw_aff_free(index); /* If the access is associated with RAR, then generate access as * A(c0, c1, c2). * If the access is associated with RAW, then generate access as * A(c0, c1 - 1, c2). * Otherwise, generate A(c0, c1, c2). */ isl_id *func = isl_id_to_id_get(data->ref2func, isl_id_copy(id)); isl_ast_expr *func_expr = isl_ast_expr_from_id(func); isl_ast_expr_list *args = isl_ast_expr_list_alloc(ctx, 0); int i; for (i = 0; i < stmt_data->n_acc_group; i++) { struct polysa_dep *dep_i = stmt_data->dep_stmt_pair[i]; if (dep_i->dest == id && dep_i->type == POLYSA_DEP_RAW) { for (int j = 0; j < data->iter_num; j++) { char iter_name[100]; isl_val *ele = isl_vec_get_element_val(dep_i->disvec, j); if (isl_val_is_zero(ele)) { sprintf(iter_name, "c%d", j); } else { sprintf(iter_name, "c%d - %ld", j, isl_val_get_num_si(ele)); } isl_id *arg = isl_id_alloc(ctx, iter_name, NULL); isl_ast_expr *arg_expr = isl_ast_expr_from_id(arg); args = isl_ast_expr_list_add(args, arg_expr); isl_val_free(ele); } /* Update the func_expr as the src func_expr. */ func = isl_id_to_id_get(data->ref2func, isl_id_copy(dep_i->src)); isl_ast_expr_free(func_expr); func_expr = isl_ast_expr_from_id(func); ast_expr = isl_ast_expr_access(func_expr, args); break; } } if (i == stmt_data->n_acc_group) { for (int j = 0; j < data->iter_num; j++) { char iter_name[100]; sprintf(iter_name, "c%d", j); isl_id *arg = isl_id_alloc(ctx, iter_name, NULL); isl_ast_expr *arg_expr = isl_ast_expr_from_id(arg); args = isl_ast_expr_list_add(args, arg_expr); } ast_expr = isl_ast_expr_access(func_expr, args); } stmt_data->stmts[stmt_data->stmt_num - 1]->ref2expr = isl_id_to_ast_expr_set(stmt_data->stmts[stmt_data->stmt_num - 1]->ref2expr, id, ast_expr); return 0; } /* Free up the dependence. */ void *polysa_dep_free(__isl_take struct polysa_dep *dep) { if (!dep) return NULL; isl_id_free(dep->src); isl_id_free(dep->dest); isl_vec_free(dep->disvec); isl_set_free(dep->src_sched_domain); isl_set_free(dep->dest_sched_domain); isl_basic_map_free(dep->isl_dep); free(dep); return NULL; } /* Generate a T2S statement for each unique dependence pair. */ isl_bool gen_t2s_stmt(__isl_take struct polysa_dep **dep_stmt_pair, struct ppcg_stmt *stmt, struct t2s_data *data) { struct t2s_stmt_data *stmt_data = data->stmt_data; isl_set *union_domain = isl_set_copy(data->stmt_data->stmt_anchor_domain); if (data->stmt_data->n_acc_group > 0) { for (int i = 0; i < data->stmt_data->n_acc_group; i++) { struct polysa_dep *dep_i = dep_stmt_pair[i]; isl_set *dest_sched_domain = isl_set_copy(dep_i->dest_sched_domain); dest_sched_domain = isl_set_set_tuple_id(dest_sched_domain, isl_set_get_tuple_id(union_domain)); union_domain = isl_set_intersect(union_domain, dest_sched_domain); } if (isl_set_is_empty(union_domain)) { free(dep_stmt_pair); isl_set_free(union_domain); return isl_bool_false; } } /* Simplify the domain. */ isl_set *anchor_domain = isl_set_copy(data->anchor_domain); isl_space *space = isl_set_get_space(union_domain); anchor_domain = isl_set_set_tuple_name(anchor_domain, isl_space_get_tuple_name(space, isl_dim_set)); union_domain = isl_set_gist(union_domain, anchor_domain); isl_space_free(space); /* Peel off the scalar dimensions. */ union_domain = t2s_peel_off_scalar_dims(union_domain, data->schedule); data->stmt_data->stmt_num += 1; data->stmt_data->stmts = (struct ppcg_stmt **)realloc(data->stmt_data->stmts, data->stmt_data->stmt_num * sizeof(struct ppcg_stmt *)); data->stmt_data->stmts[data->stmt_data->stmt_num - 1] = (struct ppcg_stmt *)malloc(sizeof(struct ppcg_stmt)); data->stmt_data->stmt_domain = (isl_set **)realloc(data->stmt_data->stmt_domain, data->stmt_data->stmt_num * sizeof(isl_set *)); data->stmt_data->stmt_domain[data->stmt_data->stmt_num - 1] = union_domain; stmt_data->stmts[stmt_data->stmt_num - 1]->stmt = stmt->stmt; stmt_data->stmts[stmt_data->stmt_num - 1]->ref2expr = isl_id_to_ast_expr_alloc(data->ctx, 0); /* Produce the ref2expr for each access. */ stmt_data->dep_stmt_pair = dep_stmt_pair; pet_tree_foreach_access_expr(stmt->stmt->body, &t2s_update_access, data); free(dep_stmt_pair); stmt_data->dep_stmt_pair = NULL; return isl_bool_true; } /* This function builds the ref2expr for each array reference in the user statement. * The LHS access A[][] is replaced by A(c0, c1, c2). * The RHS acesss B[][] is replaced by the flow dep B(c0 - 1, c1, c2). * If the RHS access is associated with multiple flow deps, we will need to the split the statement into multiple T2S statement. * At present, we don't support stmts with more than one RHS access that are associated with flow deps. * * First determine how many unique ref2expr pairs are to be generated. * Then build the ref2expr for each pair. */ static isl_stat extract_t2s_stmt_access(__isl_take struct ppcg_stmt *stmt, struct t2s_data *data) { struct t2s_stmt_data *stmt_data = data->stmt_data; /* Determine all the unique ref2expr pairs to be generated. */ pet_tree_foreach_access_expr(stmt->stmt->body, &t2s_update_dep, data); int stmt_num = 1; for (int i = 0; i < stmt_data->n_acc_group; i++) { stmt_num *= stmt_data->n_dep_per_acc_group[i]; } struct polysa_dep ***dep_stmt_pairs = NULL; dep_stmt_pairs = (struct polysa_dep ***)malloc(stmt_num * sizeof(struct polysa_dep **)); for (int i = 0; i < stmt_num; i++) { dep_stmt_pairs[i] = (struct polysa_dep **)malloc(stmt_data->n_acc_group * sizeof(struct polysa_dep *)); } int prev_repeat = 1; int post_repeat = stmt_num; int acc_group_i = 0; while(acc_group_i < stmt_data->n_acc_group) { int cur_n_dep_acc_group = stmt_data->n_dep_per_acc_group[acc_group_i]; post_repeat /= cur_n_dep_acc_group; int id = 0; for (int i = 0; i < prev_repeat; i++) for (int j = 0; j < cur_n_dep_acc_group; j++) for (int k = 0; k < post_repeat; k++) { dep_stmt_pairs[id][acc_group_i] = stmt_data->stmt_deps[acc_group_i][j]; id++; } acc_group_i++; prev_repeat *= cur_n_dep_acc_group; } /* Gnerate a seperate ppcg_stmt for each dep pair. */ stmt_data->stmt_num = 0; for (int i = 0; i < stmt_num; i++) { isl_bool success = gen_t2s_stmt(dep_stmt_pairs[i], stmt, data); if (!success) { for (int j = i; j < stmt_num - 1; j++) { dep_stmt_pairs[j] = dep_stmt_pairs[j + 1]; } stmt_num -= 1; i--; } } ppcg_stmt_free(stmt); free(dep_stmt_pairs); return isl_stat_ok; } //static char *concat(const char *s1, const char *s2) //{ // char *result = malloc(strlen(s1) + strlen(s2) + 1); // strcpy(result, s1); // strcat(result, s2); // return result; //} /* Print an "set" in T2S format. */ static char *isl_set_to_t2s_format(__isl_keep isl_set *set) { char *t2s_cst = NULL; int n_bset = isl_set_n_basic_set(set); int bset_id = 0; int multi_bset = n_bset > 1; isl_basic_set_list *bset_list = isl_set_get_basic_set_list(set); isl_printer *p = isl_printer_to_str(isl_set_get_ctx(set)); while (bset_id < n_bset) { if (bset_id > 0) { p = isl_printer_print_str(p, " || "); } if (multi_bset) { p = isl_printer_print_str(p, "("); } /* Print the content of each basic map. */ isl_basic_set *bset = isl_basic_set_list_get_basic_set(bset_list, bset_id); isl_constraint_list *cst_list = isl_basic_set_get_constraint_list(bset); int cst_id = 0; int n_cst = isl_basic_set_n_constraint(bset); int multi_cst = n_cst > 1; while (cst_id < n_cst) { if (cst_id > 0) { p = isl_printer_print_str(p, " && "); } if (multi_cst) { p = isl_printer_print_str(p, "("); } isl_constraint *cst_i = isl_constraint_list_get_constraint(cst_list, cst_id); /* TODO: consider isl_dim_div later. */ int is_first = 1; for (int i = 0; i < isl_constraint_dim(cst_i, isl_dim_set); i++) { isl_val *val = isl_constraint_get_coefficient_val(cst_i, isl_dim_set, i); const char *name = isl_constraint_get_dim_name(cst_i, isl_dim_set, i); if (!isl_val_is_zero(val)) { if (!is_first) { p = isl_printer_print_str(p, " + "); is_first = 0; } if (!isl_val_is_one(val)) { p = isl_printer_print_val(p, val); p = isl_printer_print_str(p, " * "); } p = isl_printer_print_str(p, name); if (is_first) is_first = 0; } isl_val_free(val); } for (int i = 0; i < isl_constraint_dim(cst_i, isl_dim_param); i++) { isl_val *val = isl_constraint_get_coefficient_val(cst_i, isl_dim_param, i); const char *name = isl_constraint_get_dim_name(cst_i, isl_dim_param, i); if (!isl_val_is_zero(val)) { if (!is_first) { p = isl_printer_print_str(p, " + "); is_first = 0; } if (!isl_val_is_one(val)) { p = isl_printer_print_val(p, val); p = isl_printer_print_str(p, " * "); } p = isl_printer_print_str(p, name); if (is_first) is_first = 0; } isl_val_free(val); } isl_val *cst_val = isl_constraint_get_constant_val(cst_i); if (!isl_val_is_zero(cst_val)) { p = isl_printer_print_str(p, " + "); p = isl_printer_print_val(p, cst_val); } isl_val_free(cst_val); if (isl_constraint_is_equality(cst_i)) { p = isl_printer_print_str(p, " == 0"); } else { p = isl_printer_print_str(p, " >= 0"); } isl_constraint_free(cst_i); if (multi_cst) { p = isl_printer_print_str(p, ")"); } cst_id++; } isl_constraint_list_free(cst_list); if (multi_bset) { p = isl_printer_print_str(p, ")"); } isl_basic_set_free(bset); bset_id++; } isl_basic_set_list_free(bset_list); t2s_cst = isl_printer_get_str(p); isl_printer_free(p); return t2s_cst; } /* This function takes in the C statement "c_text" like * C[i][j] = 0 * and the iteration domain "domain", * prints out the T2S statement like * C(i,j) = select(i == 0 && j == 0, C(i,j-1), C(i,j)) */ static __isl_give char *c_to_t2s_stmt(__isl_take char *c_text, __isl_take isl_set *domain, int iter_num) { char ch; int loc = 0; int insert_select; char *iter_domain = NULL; char *t2s_text = NULL; isl_printer *p = isl_printer_to_str(isl_set_get_ctx(domain)); char *LHS_func = NULL; int at_LHS = 1; isl_ctx *ctx = isl_set_get_ctx(domain); isl_printer *p_LHS = isl_printer_to_str(ctx); /* Generate the iteration domain constructs in T2S format. */ if (!isl_set_is_empty(domain)) { /* Set up the iterators. */ for (int i = 0; i < isl_set_dim(domain, isl_dim_set); i++) { char iter_name[100]; sprintf(iter_name, "c%d", i); isl_id *id_i = isl_id_alloc(ctx, iter_name, NULL); domain = isl_set_set_dim_id(domain, isl_dim_set, i, id_i); } iter_domain = isl_set_to_t2s_format(domain); } while ((ch = c_text[loc]) != '\0') { if (ch == '=') { while((ch = c_text[++loc]) == ' ') { ; } p = isl_printer_print_str(p, "= "); if (iter_domain) { p = isl_printer_print_str(p, "select("); p = isl_printer_print_str(p, iter_domain); p = isl_printer_print_str(p, ", "); } } else if (ch == ';') { if (iter_domain) { // p = isl_printer_print_str(p, ", "); // p = isl_printer_print_str(p, LHS_func); p = isl_printer_print_str(p, ")"); } } else if (ch == '[') { isl_printer *p_func = isl_printer_to_str(isl_set_get_ctx(domain)); p_func = isl_printer_print_str(p_func, "("); loc++; int dim_cnt = 0; while((ch = c_text[loc]) && (dim_cnt < iter_num)) { if (ch == ']') { dim_cnt++; if (c_text[loc + 1] == '[') p_func = isl_printer_print_str(p_func, ", "); else p_func = isl_printer_print_str(p_func, ")"); } else if (ch == '[') { loc++; continue; } else { char ch_str[2]; ch_str[0] = ch; ch_str[1] = '\0'; p_func = isl_printer_print_str(p_func, ch_str); } loc++; } char *func_str = isl_printer_get_str(p_func); p = isl_printer_print_str(p, func_str); if (at_LHS) { p_LHS = isl_printer_print_str(p_LHS, func_str); LHS_func = isl_printer_get_str(p_LHS); at_LHS = 0; } free(func_str); isl_printer_free(p_func); } char ch_str[2]; ch_str[0] = ch; ch_str[1] = '\0'; p = isl_printer_print_str(p, ch_str); if (at_LHS) { p_LHS = isl_printer_print_str(p_LHS, ch_str); } loc++; } t2s_text = isl_printer_get_str(p); isl_printer_free(p); isl_printer_free(p_LHS); free(iter_domain); free(LHS_func); free(c_text); isl_set_free(domain); return t2s_text; } /* Generate the number denotes how many times the given function has been updated. */ static int get_t2s_URE_update_level(struct t2s_URE **UREs, int URE_num, __isl_take char *func_name) { char **URE_names = NULL; if (URE_num > 0) { URE_names = (char **)malloc(URE_num * sizeof(char *)); for (int i = 0; i < URE_num; i++) { URE_names[i] = strdup(UREs[i]->name); } } int update_level = -1; for (int i = 0; i < URE_num; i++) { char *cur_name = URE_names[i]; if (strlen(cur_name) >= strlen(func_name)) { char cur_name_prefix[strlen(cur_name) + 1]; char ch; int loc = 0; while ((ch = cur_name[loc]) != '\0') { if (ch == '.') break; else { cur_name_prefix[loc] = cur_name[loc]; loc++; } } cur_name_prefix[loc] = '\0'; if (!strcmp(cur_name_prefix, func_name)) update_level++; } } if (URE_num > 0) { for (int i = 0; i < URE_num; i++) { free(URE_names[i]); } } free(URE_names); free(func_name); return update_level; } /* Given the func name, update the URE name and the update_level. */ static __isl_give struct t2s_URE *create_t2s_URE(__isl_keep struct t2s_URE **UREs, int URE_num, __isl_take char *func_name, __isl_take char *URE_text, int d, isl_ctx *ctx) { struct t2s_URE *URE = (struct t2s_URE *)malloc(sizeof(struct t2s_URE)); char **URE_names = NULL; if (URE_num > 0) { URE_names = (char **)malloc(URE_num * sizeof(char *)); for (int i = 0; i < URE_num; i++) { URE_names[i] = strdup(UREs[i]->name); } } int update_level = -1; for (int i = 0; i < URE_num; i++) { char *cur_name = URE_names[i]; if (strlen(cur_name) >= strlen(func_name)) { char cur_name_prefix[strlen(cur_name) + 1]; char ch; int loc = 0; while ((ch = cur_name[loc]) != '\0') { if (ch == '.') break; else { cur_name_prefix[loc] = cur_name[loc]; loc++; } } cur_name_prefix[loc] = '\0'; if (!strcmp(cur_name_prefix, func_name)) update_level++; } } isl_printer *p = isl_printer_to_str(ctx); p = isl_printer_print_str(p, func_name); if (update_level >= 0) { p = isl_printer_print_str(p, ".update("); p = isl_printer_print_int(p, update_level); p = isl_printer_print_str(p, ")"); } URE->name = isl_printer_get_str(p); isl_printer_free(p); URE->d = d; URE->text = URE_text; URE->update_level = update_level; if (URE_num > 0) { for (int i = 0; i < URE_num; i++) { free(URE_names[i]); } } free(URE_names); free(func_name); return URE; } /* Create the T2S URE from the statement text. */ static isl_stat create_t2s_URE_from_text(struct t2s_data *data, __isl_take char *URE_text, int d, isl_ctx *ctx) { char *func_name; char ch; int loc = 0; isl_printer *p = isl_printer_to_str(ctx); char *func_name_tmp; struct t2s_URE **UREs = data->URE; int URE_num = data->URE_num; while ((ch = URE_text[loc]) != '\0') { if (ch == '=') break; char ch_arr[2]; ch_arr[0] = ch; ch_arr[1] = '\0'; p = isl_printer_print_str(p, ch_arr); loc++; } func_name_tmp = isl_printer_get_str(p); isl_printer_free(p); loc = strlen(func_name_tmp) - 1; while((ch = func_name_tmp[loc--]) != ' ') { ; } char *func_decl = (char *)malloc(sizeof(char) * (loc + 1 + 1)); strncpy(func_decl, func_name_tmp, loc + 1); func_decl[loc + 1] = '\0'; while((ch = func_name_tmp[loc--]) != '(') { ; } func_name = (char *)malloc(sizeof(char) * (loc + 1 + 1)); strncpy(func_name, func_name_tmp, loc + 1); func_name[loc + 1] = '\0'; free(func_name_tmp); int update_level = get_t2s_URE_update_level(UREs, URE_num, strdup(func_name)); // if (update_level == -1) { // p = isl_printer_to_str(ctx); // p = isl_printer_print_str(p, func_decl); // p = isl_printer_print_str(p, " = 0;\n"); // char *init_URE_text = isl_printer_get_str(p); // isl_printer_free(p); // // data->URE = (struct t2s_URE **)realloc(data->URE, sizeof(struct t2s_URE *) * (data->URE_num + 1)); // data->URE[data->URE_num] = create_t2s_URE(data->URE, data->URE_num, strdup(func_name), init_URE_text, d, ctx); // data->URE_num++; // } /* Add the statement URE. */ data->URE = (struct t2s_URE **)realloc(data->URE, sizeof(struct t2s_URE *) * (data->URE_num + 1)); data->URE[data->URE_num] = create_t2s_URE(data->URE, data->URE_num, strdup(func_name), URE_text, d, ctx); data->URE_num++; free(func_name); free(func_decl); return isl_stat_ok; } /* Free up the t2s_stmt_data. */ static __isl_null struct t2s_stmt_data *t2s_stmt_data_free(__isl_take struct t2s_stmt_data *d) { if (!d) return NULL; for (int i = 0; i < d->stmt_num; i++) { ppcg_stmt_free(d->stmts[i]); isl_set_free(d->stmt_domain[i]); } free(d->stmts); free(d->stmt_domain); isl_set_free(d->stmt_anchor_domain); isl_pw_multi_aff_free(d->iterator_map); for (int i = 0; i < d->n_acc_group; i++) { for (int j = 0; j < d->n_dep_per_acc_group[i]; j++) { polysa_dep_free(d->stmt_deps[i][j]); } free(d->stmt_deps[i]); } free(d->stmt_deps); free(d->n_dep_per_acc_group); free(d); return NULL; } /* Buggy Implementation. */ ///* For each user statement, there will be multiple T2S UREs generated given different // * dependences. To improve the hardware efficiency and code readability, there UREs // * will be merged into one UREs in this function. // * For example, given two UREs: // * A(i, j, k) = select(D1, A(i, j, k) + B(i, j, k)); // * A(i, j, k) = select(D2, A(i, j, k - 1) + B(i, j, k)); // * We will merge them into one URE below: // * A(i, j, k) = select(D1, A(i, j, k), select(D2, A(i, j, k - 1))) + // * select(D1, B(i, j, k), select(D2, B(i, j, k))); // */ //char *merge_t2s_stmt_text(__isl_take char **stmt_texts, int n, isl_ctx *ctx) { // char **iter_domain = (char **)malloc(sizeof(char *) * n); // int n_func = 0; // // /* Collect number of functions in the statement. */ // char *text = stmt_texts[0]; // char ch; // int loc = 0; // bool is_func = true; // while((ch = text[loc]) != '\0') { // if (ch == '(') { // ch = text[++loc]; // while(ch != '(' && ch != ')') { // ch = text[++loc]; // if (ch == '=' || ch == '>' || ch == '<') // is_func = false; // } // if (ch == ')') { // if (is_func) // n_func++; // is_func = true; // } else if (ch == '(') { // loc--; // } // } // loc++; // } // // char ***func = (char ***)malloc(sizeof(char **) * n_func); // for (int i = 0; i < n_func; i++) { // func[i] = (char **)malloc(sizeof(char *) * n); // } // int* func_offset = (int *)malloc(sizeof(int) * n_func); // // /* Collect all the iteration domains and func names. */ // for (int i = 0; i < n; i++) { // char *text = stmt_texts[i]; // char ch; // int loc = 0; // int func_id = 0; // while((ch = text[loc]) != '\0') { // if (ch == 's') { // char token[6]; // if (loc + 6 <= strlen(text)) { // strncpy(token, text + loc, 6); // } // if (!strcmp(token, "select")) { // /* Collect the iteration domain. */ // isl_printer *p_str = isl_printer_to_str(ctx); // loc += 6; // loc += 1; // while (ch = text[loc] != ',') { // char ch_arr[2]; // ch_arr[0] = ch; // ch_arr[1] = '\0'; // p_str = isl_printer_print_str(p_str, ch_arr); // loc++; // } // iter_domain[i] = isl_printer_get_str(p_str); // isl_printer_free(p_str); // } // } // // /* Collect the func names. */ // if (ch == '(') { // while (loc >= 0 && ((ch = text[loc]) != ' ')) // loc--; // loc++; // isl_printer *p_str = isl_printer_to_str(ctx); // while((ch = text[loc]) != '(') { // char ch_arr[2]; // ch_arr[0] = ch; // ch_arr[1] = '\0'; // p_str = isl_printer_print_str(p_str, ch_arr); // loc++; // } // // int loc_cur = loc; // char ch_arr[2]; // ch_arr[0] = ch; // ch_arr[1] = '\0'; // p_str = isl_printer_print_str(p_str, ch_arr); // loc++; // while((ch = text[loc]) != ')') { // char ch_arr[2]; // ch_arr[0] = ch; // ch_arr[1] = '\0'; // p_str = isl_printer_print_str(p_str, ch_arr); // if (ch == '(') { // loc--; // p_str = isl_printer_free(p_str); // break; // } // loc++; // } // if (p_str) { // p_str = isl_printer_print_str(p_str, ")"); // func[func_id][i] = isl_printer_get_str(p_str); // if (i == 0) // func_offset[func_id] = loc - strlen(func[func_id][i]) + 1; // func_id++; // p_str = isl_printer_free(p_str); // } // } // loc++; // } // } // // /* Scan through the statement text and plug in the functions and domains. */ // loc = 0; // text = stmt_texts[0]; // isl_printer *p_str = isl_printer_to_str(ctx); // int func_cnt = 0; // while ((ch = text[loc]) != '\0') { // if (loc == func_offset[func_cnt]) { // if (func_cnt == 0) // p_str = isl_printer_print_str(p_str, func[func_cnt][0]); // else { // for (int i = 0; i < n; i++) { // if (i > 0) // p_str = isl_printer_print_str(p_str, ", "); // p_str = isl_printer_print_str(p_str, "select("); // p_str = isl_printer_print_str(p_str, iter_domain[i]); // p_str = isl_printer_print_str(p_str, ", "); // p_str = isl_printer_print_str(p_str, func[func_cnt][i]); // } // for (int i = 0; i < n; i++) { // p_str = isl_printer_print_str(p_str, ")"); // } // } // loc += strlen(func[func_cnt][0]); // func_cnt++; // } else { // char ch_arr[2]; // ch_arr[0] = ch; // ch_arr[1] = '\0'; // p_str = isl_printer_print_str(p_str, ch_arr); // } // loc++; // } // // char *merge_text = isl_printer_get_str(p_str); // isl_printer_free(p_str); // for (int i = 0; i < n; i++) { // free(stmt_texts[i]); // } // free(stmt_texts); // // return merge_text; //} /* Buggy implementation. */ ///* Transform each user statement in the original program to a T2S URE // * w/ URE simplification. // * In this function, only one URE is generated for each user statement. // */ //static __isl_give isl_schedule_node *gen_stmt_text_single(__isl_take isl_schedule_node *node, void *user) //{ // struct ppcg_stmt *stmt; // isl_set *domain; // isl_space *space; // isl_id *id; // struct t2s_data *data = user; // struct t2s_stmt_data *stmt_data; // // if (!node) // return NULL; // // if (isl_schedule_node_get_type(node) != isl_schedule_node_leaf) // return node; // // isl_ctx *ctx = isl_schedule_node_get_ctx(node); // // /* Find the stmt. */ // stmt = isl_calloc_type(data->ctx, struct ppcg_stmt); // domain = isl_set_from_union_set(isl_schedule_node_get_domain(node)); // space = isl_set_get_space(domain); // id = isl_space_get_tuple_id(space, isl_dim_set); // stmt->stmt = find_stmt(data->scop, id); // isl_space_free(space); // isl_set_free(domain); // // /* Decide if there will be multiple T2S stmts generated for one stmt. // * Construct the unique acc->func mapping for each T2S stmts.*/ // stmt_data = isl_calloc_type(data->ctx, struct t2s_stmt_data); // stmt_data->stmt_num = 0; // stmt_data->stmts = NULL; // stmt_data->stmt_domain = NULL; // stmt_data->stmt_deps = NULL; // stmt_data->n_acc_group = 0; // stmt_data->n_dep_per_acc_group = 0; // stmt_data->dep_stmt_pair = NULL; // stmt_data->iterator_map = NULL; // isl_set_list *stmt_domain_list = isl_union_set_get_set_list(data->stmt_domain); // for (int i = 0; i < isl_union_set_n_set(data->stmt_domain); i++) { // isl_set *stmt_domain_i = isl_set_list_get_set(stmt_domain_list, i); // isl_space *space_i = isl_set_get_space(stmt_domain_i); // isl_id *id_i = isl_space_get_tuple_id(space_i, isl_dim_set); // if (id_i == id) // stmt_data->stmt_anchor_domain = isl_set_copy(stmt_domain_i); // isl_set_free(stmt_domain_i); // isl_space_free(space_i); // isl_id_free(id_i); // } // isl_set_list_free(stmt_domain_list); // isl_id_free(id); // // data->stmt_data = stmt_data; // /* Extract the ref2expr for each access. */ // extract_t2s_stmt_access(stmt, data); // // data->URE = (struct t2s_URE **)realloc(data->URE, sizeof(struct t2s_URE *) * (data->URE_num + data->stmt_data->stmt_num)); // char **stmt_texts = isl_calloc_array(data->ctx, char *, data->stmt_data->stmt_num); // // /* Print the stmt to data->t2s_stmt_text and update data->t2s_stmt_num. */ // for (int i = 0; i < data->stmt_data->stmt_num; i++) { // isl_printer *p_str = isl_printer_to_str(data->ctx); // p_str = isl_printer_set_output_format(p_str, ISL_FORMAT_C); // struct ppcg_stmt *stmt_i = data->stmt_data->stmts[i]; // p_str = pet_stmt_print_body(stmt_i->stmt, p_str, stmt_i->ref2expr); // char *stmt_text = isl_printer_get_str(p_str); // stmt_texts[i] = c_to_t2s_stmt(stmt_text, isl_set_copy(data->stmt_data->stmt_domain[i]), data->iter_num); //// create_t2s_URE_from_text(data, stmt_text, 0, ctx); // isl_printer_free(p_str); // } // char *merged_stmt_text = merge_t2s_stmt_text(stmt_texts, data->stmt_data->stmt_num, data->ctx); // create_t2s_URE_from_text(data, merged_stmt_text, 0, ctx); // // data->stmt_data = t2s_stmt_data_free(stmt_data); // // return node; //} /* Transform each user statement in the original program to a T2S URE. * w/o URE simplification. * In this function, multiple UREs can be generated for each user statement. */ static __isl_give isl_schedule_node *gen_stmt_text(__isl_take isl_schedule_node *node, void *user) { struct ppcg_stmt *stmt; isl_set *domain; isl_space *space; isl_id *id; struct t2s_data *data = user; struct t2s_stmt_data *stmt_data; if (!node) return NULL; if (isl_schedule_node_get_type(node) != isl_schedule_node_leaf) return node; isl_ctx *ctx = isl_schedule_node_get_ctx(node); // // debug // isl_printer *p = isl_printer_to_file(data->ctx, stdout); // p = isl_printer_set_yaml_style(p, ISL_YAML_STYLE_BLOCK); // p = isl_printer_print_schedule_node(p, node); // printf("\n"); // // debug /* Find the stmt. */ stmt = isl_calloc_type(data->ctx, struct ppcg_stmt); domain = isl_set_from_union_set(isl_schedule_node_get_domain(node)); space = isl_set_get_space(domain); id = isl_space_get_tuple_id(space, isl_dim_set); stmt->stmt = find_stmt(data->scop, id); isl_space_free(space); isl_set_free(domain); /* Decide if there will be multiple T2S stmts generated for one stmt. * Construct the unique acc->func mapping for each T2S stmts.*/ stmt_data = isl_calloc_type(data->ctx, struct t2s_stmt_data); stmt_data->stmt_num = 0; stmt_data->stmts = NULL; stmt_data->stmt_domain = NULL; stmt_data->stmt_deps = NULL; stmt_data->n_acc_group = 0; stmt_data->n_dep_per_acc_group = 0; stmt_data->dep_stmt_pair = NULL; stmt_data->iterator_map = NULL; isl_set_list *stmt_domain_list = isl_union_set_get_set_list(data->stmt_domain); for (int i = 0; i < isl_union_set_n_set(data->stmt_domain); i++) { isl_set *stmt_domain_i = isl_set_list_get_set(stmt_domain_list, i); isl_space *space_i = isl_set_get_space(stmt_domain_i); isl_id *id_i = isl_space_get_tuple_id(space_i, isl_dim_set); if (id_i == id) stmt_data->stmt_anchor_domain = isl_set_copy(stmt_domain_i); isl_set_free(stmt_domain_i); isl_space_free(space_i); isl_id_free(id_i); } isl_set_list_free(stmt_domain_list); isl_id_free(id); // // debug // isl_printer *p = isl_printer_to_file(data->ctx, stdout); // p = isl_printer_print_set(p, stmt_data->stmt_anchor_domain); // printf("\n"); // // debug data->stmt_data = stmt_data; /* Extract the ref2expr for each access. */ extract_t2s_stmt_access(stmt, data); data->URE = (struct t2s_URE **)realloc(data->URE, sizeof(struct t2s_URE *) * (data->URE_num + data->stmt_data->stmt_num)); /* Print the stmt to data->t2s_stmt_text and update data->t2s_stmt_num. */ for (int i = 0; i < data->stmt_data->stmt_num; i++) { isl_printer *p_str = isl_printer_to_str(data->ctx); p_str = isl_printer_set_output_format(p_str, ISL_FORMAT_C); struct ppcg_stmt *stmt_i = data->stmt_data->stmts[i]; p_str = pet_stmt_print_body(stmt_i->stmt, p_str, stmt_i->ref2expr); // // debug // isl_printer *p_debug = isl_printer_to_file(data->ctx, stdout); // p_debug = isl_printer_print_set(p_debug, data->stmt_data->stmt_domain[i]); // printf("\n"); // // debug char *stmt_text = isl_printer_get_str(p_str); stmt_text = c_to_t2s_stmt(stmt_text, isl_set_copy(data->stmt_data->stmt_domain[i]), data->iter_num); create_t2s_URE_from_text(data, stmt_text, 0, ctx); isl_printer_free(p_str); } data->stmt_data = t2s_stmt_data_free(stmt_data); return node; } /* Generate the array access from isl_multi_pw_aff "mpa". */ static __isl_give char *array_acc_from_multi_pw_aff(__isl_take isl_multi_pw_aff *mpa) { isl_ctx *ctx; ctx = isl_multi_pw_aff_get_ctx(mpa); isl_space *space = isl_multi_pw_aff_get_space(mpa); const char *array_name = isl_space_get_tuple_name(space, isl_dim_out); isl_printer *p_str = isl_printer_to_str(ctx); p_str = isl_printer_print_multi_pw_aff(p_str, mpa); char *mpa_str = isl_printer_get_str(p_str); isl_printer_free(p_str); p_str = isl_printer_to_str(ctx); p_str = isl_printer_print_str(p_str, array_name); char ch; int loc = 0; while ((ch = mpa_str[loc]) != '\0') { if (ch == '(') { while((ch = mpa_str[++loc]) == '(') ; loc--; p_str = isl_printer_print_str(p_str, "["); while((ch = mpa_str[++loc]) != ')') { char ch_arr[2]; ch_arr[0] = ch; ch_arr[1] = '\0'; p_str = isl_printer_print_str(p_str, ch_arr); } p_str = isl_printer_print_str(p_str, "]"); } loc++; } char *acc_str = isl_printer_get_str(p_str); free(mpa_str); isl_printer_free(p_str); isl_multi_pw_aff_free(mpa); isl_space_free(space); return acc_str; } /* Set the iterator names in the target domain. */ static __isl_give isl_set *t2s_set_set_iters(__isl_take isl_set *s) { isl_ctx *ctx = isl_set_get_ctx(s); for (int i = 0; i < isl_set_dim(s, isl_dim_set); i++) { char iter_name[100]; sprintf(iter_name, "c%d", i); isl_id *id_i = isl_id_alloc(ctx, iter_name, NULL); s = isl_set_set_dim_id(s, isl_dim_set, i, id_i); } return s; } /* Set up the iterator names. */ static __isl_give isl_multi_pw_aff *t2s_set_multi_pw_aff_iters(__isl_take isl_multi_pw_aff *mpa) { isl_ctx *ctx = isl_multi_pw_aff_get_ctx(mpa); for (int i = 0; i < isl_multi_pw_aff_dim(mpa, isl_dim_in); i++) { char iter_name[100]; sprintf(iter_name, "c%d", i); isl_id *id_i = isl_id_alloc(ctx, iter_name, NULL); mpa = isl_multi_pw_aff_set_dim_id(mpa, isl_dim_in, i, id_i); } return mpa; } /* Create UREs for live-in accesses associated with RAR deps. */ static int t2s_rar_URE_access(__isl_keep pet_expr *expr, void *user) { struct t2s_data *data = user; struct t2s_stmt_data *stmt_data = data->stmt_data; isl_multi_pw_aff *index; isl_id *id; id = isl_id_copy(expr->acc.ref_id); index = isl_multi_pw_aff_copy(expr->acc.index); struct polysa_dep *dep; int n; isl_ctx *ctx = data->ctx; char *URE_text; for (n = 0; n < data->ndeps; n++) { dep = data->deps[n]; if (dep->dest == id && dep->type == POLYSA_DEP_RAR) break; } if (n != data->ndeps) { isl_set *stmt_domain = isl_set_copy(stmt_data->stmt_anchor_domain); isl_set *dep_dest_domain = isl_set_copy(dep->dest_sched_domain); /* Generate the init domain */ isl_set *init_domain = isl_set_subtract(stmt_domain, isl_set_copy(dep_dest_domain)); isl_set *anchor_domain = isl_set_copy(data->anchor_domain); anchor_domain = isl_set_set_tuple_name(anchor_domain, isl_set_get_tuple_name(init_domain)); init_domain = isl_set_gist(init_domain, isl_set_copy(anchor_domain)); isl_set *reuse_domain = isl_set_gist(dep_dest_domain, anchor_domain); /* Peel off the scalar dimensions */ init_domain = t2s_peel_off_scalar_dims(init_domain, data->schedule); reuse_domain = t2s_peel_off_scalar_dims(reuse_domain, data->schedule); /* Set up the iterator names. */ init_domain = t2s_set_set_iters(init_domain); reuse_domain = t2s_set_set_iters(reuse_domain); char *init_domain_str = isl_set_to_t2s_format(init_domain); isl_set_free(init_domain); char *reuse_domain_str = isl_set_to_t2s_format(reuse_domain); isl_set_free(reuse_domain); /* Generate the func name .*/ isl_id *func = isl_id_to_id_get(data->ref2func, isl_id_copy(id)); isl_printer *p_str = isl_printer_to_str(ctx); p_str = isl_printer_print_id(p_str, func); char *func_name = isl_printer_get_str(p_str); isl_printer_free(p_str); p_str = isl_printer_to_str(ctx); p_str = isl_printer_print_id(p_str, func); p_str = isl_printer_print_str(p_str, "("); for (int i = 0; i < data->iter_num; i++) { if (i > 0) { p_str = isl_printer_print_str(p_str, ", "); } char iter_name[100]; sprintf(iter_name, "c%d", i); p_str = isl_printer_print_str(p_str, iter_name); } p_str = isl_printer_print_str(p_str, ")"); char *func_str = isl_printer_get_str(p_str); isl_printer_free(p_str); p_str = isl_printer_to_str(ctx); p_str = isl_printer_print_id(p_str, func); p_str = isl_printer_print_str(p_str, "("); for (int i = 0; i < data->iter_num; i++) { if (i > 0) { p_str = isl_printer_print_str(p_str, ", "); } char iter_name[100]; isl_val *ele = isl_vec_get_element_val(dep->disvec, i); if (isl_val_is_zero(ele)) { sprintf(iter_name, "c%d", i); } else { sprintf(iter_name, "c%d - %ld", i, isl_val_get_num_si(ele)); } p_str = isl_printer_print_str(p_str, iter_name); isl_val_free(ele); } p_str = isl_printer_print_str(p_str, ")"); char *reuse_func_str = isl_printer_get_str(p_str); isl_printer_free(p_str); /* Generate the transformed index. */ isl_multi_pw_aff *trans_index = isl_multi_pw_aff_copy(index); trans_index = isl_multi_pw_aff_pullback_pw_multi_aff(trans_index, isl_pw_multi_aff_copy(stmt_data->iterator_map)); /* Set up the iterator names. */ trans_index = t2s_set_multi_pw_aff_iters(trans_index); char *acc_str = array_acc_from_multi_pw_aff(trans_index); /* Generate the URE. */ int update_level = get_t2s_URE_update_level(data->URE, data->URE_num, strdup(func_name)); /* Comment out. The latest T2S no longer requires this. */ // if (update_level == -1) { // p_str = isl_printer_to_str(ctx); // p_str = isl_printer_print_str(p_str, func_str); // p_str = isl_printer_print_str(p_str, " = 0;\n"); // URE_text = isl_printer_get_str(p_str); // isl_printer_free(p_str); // // data->URE = (struct t2s_URE **)realloc(data->URE, sizeof(struct t2s_URE *) * (data->URE_num + 1)); // data->URE[data->URE_num] = create_t2s_URE(data->URE, data->URE_num, strdup(func_name), URE_text, 0, ctx); // data->URE_num++; // } p_str = isl_printer_to_str(ctx); p_str = isl_printer_print_str(p_str, func_str); p_str = isl_printer_print_str(p_str, " = "); p_str = isl_printer_print_str(p_str, "select("); p_str = isl_printer_print_str(p_str, init_domain_str); p_str = isl_printer_print_str(p_str, ", "); p_str = isl_printer_print_str(p_str, acc_str); p_str = isl_printer_print_str(p_str, ", select("); p_str = isl_printer_print_str(p_str, reuse_domain_str); p_str = isl_printer_print_str(p_str, ", "); p_str = isl_printer_print_str(p_str, reuse_func_str); // p_str = isl_printer_print_str(p_str, ", "); // p_str = isl_printer_print_str(p_str, func_str); p_str = isl_printer_print_str(p_str, "));\n"); URE_text = isl_printer_get_str(p_str); isl_printer_free(p_str); // data->t2s_stmt_text = (char **)realloc(data->t2s_stmt_text, sizeof(char *) * (data->t2s_stmt_num + 1)); // data->t2s_stmt_text[data->t2s_stmt_num] = URE_text; // data->t2s_stmt_num++; data->URE = (struct t2s_URE **)realloc(data->URE, sizeof(struct t2s_URE *) * (data->URE_num + 1)); data->URE[data->URE_num] = create_t2s_URE(data->URE, data->URE_num, strdup(func_name), URE_text, 0, ctx); data->URE_num++; isl_id_free(func); free(func_str); free(func_name); free(init_domain_str); free(acc_str); free(reuse_domain_str); free(reuse_func_str); } isl_id_free(id); isl_multi_pw_aff_free(index); return 0; } /* Extract UREs for live-in accesses. */ static isl_stat extract_rar_URE(__isl_take struct ppcg_stmt *stmt, struct t2s_data *data) { pet_tree_foreach_access_expr(stmt->stmt->body, &t2s_rar_URE_access, data); ppcg_stmt_free(stmt); return isl_stat_ok; } /* Generate the reuse (RAR) UREs for the operands in the user statements. */ static __isl_give isl_schedule_node *gen_op_stmt_text(__isl_take isl_schedule_node *node, void *user) { struct t2s_data *data = user; struct t2s_stmt_data *stmt_data; struct ppcg_stmt *stmt; isl_set *domain; isl_space *space; isl_id *id; if (!node) return NULL; if (isl_schedule_node_get_type(node) != isl_schedule_node_leaf) return node; /* Find the statement. */ stmt = isl_calloc_type(data->ctx, struct ppcg_stmt); domain = isl_set_from_union_set(isl_schedule_node_get_domain(node)); space = isl_set_get_space(domain); id = isl_space_get_tuple_id(space, isl_dim_set); stmt->stmt = find_stmt(data->scop, id); isl_space_free(space); isl_set_free(domain); stmt_data = isl_calloc_type(data->ctx, struct t2s_stmt_data); stmt_data->stmt_num = 0; stmt_data->stmts = NULL; stmt_data->stmt_domain = NULL; stmt_data->stmt_deps = NULL; stmt_data->n_acc_group = 0; stmt_data->n_dep_per_acc_group = 0; stmt_data->dep_stmt_pair = NULL; stmt_data->iterator_map = NULL; isl_set_list *stmt_domain_list = isl_union_set_get_set_list(data->stmt_domain); for (int i = 0; i < isl_union_set_n_set(data->stmt_domain); i++) { isl_set *stmt_domain_i = isl_set_list_get_set(stmt_domain_list, i); isl_space *space_i = isl_set_get_space(stmt_domain_i); isl_id *id_i = isl_space_get_tuple_id(space_i, isl_dim_set); if (id_i == id) stmt_data->stmt_anchor_domain = isl_set_copy(stmt_domain_i); isl_set_free(stmt_domain_i); isl_space_free(space_i); isl_id_free(id_i); } isl_set_list_free(stmt_domain_list); isl_id_free(id); data->stmt_data = stmt_data; /* Extract the schedule. */ isl_map *sched = isl_map_from_union_map(isl_schedule_node_get_prefix_schedule_relation(node)); sched = isl_map_reverse(sched); isl_pw_multi_aff *iterator_map = isl_pw_multi_aff_from_map(sched); data->stmt_data->iterator_map = iterator_map; extract_rar_URE(stmt, data); data->stmt_data = t2s_stmt_data_free(stmt_data); return node; } /* Create UREs for live-out accesses. */ static int t2s_drain_URE_access(__isl_keep pet_expr *expr, void *user) { struct t2s_data *data = user; struct t2s_stmt_data *stmt_data = data->stmt_data; isl_multi_pw_aff *index; isl_id *id; id = isl_id_copy(expr->acc.ref_id); index = isl_multi_pw_aff_copy(expr->acc.index); struct polysa_dep *dep; int n; isl_ctx *ctx = data->ctx; isl_set *writeout_domain = isl_set_copy(stmt_data->stmt_anchor_domain); int is_drain = 0; for (n = 0; n < data->ndeps; n++) { dep = data->deps[n]; if (dep->src == id && dep->type == POLYSA_DEP_WAW) { isl_set *dep_src_domain = isl_set_copy(dep->src_sched_domain); /* Generate the writeout domain */ writeout_domain = isl_set_subtract(writeout_domain, dep_src_domain); is_drain = 1; } } if (isl_set_is_empty(writeout_domain) || !is_drain) { isl_set_free(writeout_domain); isl_id_free(id); isl_multi_pw_aff_free(index); return 0; } else { isl_set *anchor_domain = isl_set_copy(data->anchor_domain); anchor_domain = isl_set_set_tuple_name(anchor_domain, isl_set_get_tuple_name(writeout_domain)); writeout_domain = isl_set_gist(writeout_domain, anchor_domain); /* Peel off the scalar dimensions. */ writeout_domain = t2s_peel_off_scalar_dims(writeout_domain, data->schedule); /* Set up the iterator names. */ writeout_domain = t2s_set_set_iters(writeout_domain); char *writeout_domain_str = isl_set_to_t2s_format(writeout_domain); isl_set_free(writeout_domain); /* Generate the func name .*/ isl_id *func = isl_id_to_id_get(data->ref2func, isl_id_copy(id)); isl_printer *p_str = isl_printer_to_str(ctx); p_str = isl_printer_print_id(p_str, func); p_str = isl_printer_print_str(p_str, "_drain"); char *drain_func_name = isl_printer_get_str(p_str); isl_printer_free(p_str); p_str = isl_printer_to_str(ctx); p_str = isl_printer_print_id(p_str, func); p_str = isl_printer_print_str(p_str, "("); for (int i = 0; i < data->iter_num; i++) { if (i > 0) { p_str = isl_printer_print_str(p_str, ", "); } char iter_name[100]; sprintf(iter_name, "c%d", i); p_str = isl_printer_print_str(p_str, iter_name); } p_str = isl_printer_print_str(p_str, ")"); char *func_str = isl_printer_get_str(p_str); isl_printer_free(p_str); p_str = isl_printer_to_str(ctx); p_str = isl_printer_print_id(p_str, func); p_str = isl_printer_print_str(p_str, "_drain("); for (int i = 0; i < data->iter_num; i++) { if (i > 0) { p_str = isl_printer_print_str(p_str, ", "); } char iter_name[100]; sprintf(iter_name, "c%d", i); p_str = isl_printer_print_str(p_str, iter_name); } p_str = isl_printer_print_str(p_str, ")"); char *drain_func_str = isl_printer_get_str(p_str); isl_printer_free(p_str); // /* Generate the transformed index. */ // isl_multi_pw_aff *trans_index = isl_multi_pw_aff_copy(index); // trans_index = isl_multi_pw_aff_pullback_pw_multi_aff(trans_index, isl_pw_multi_aff_copy(stmt_data->iterator_map)); // // /* Set up the iterator names. */ // trans_index = t2s_set_multi_pw_aff_iters(trans_index); // char *acc_str = array_acc_from_multi_pw_aff(trans_index); /* Generate the URE. */ // p_str = isl_printer_to_str(ctx); // p_str = isl_printer_print_str(p_str, drain_func_str); // p_str = isl_printer_print_str(p_str, " = 0;\n"); // char *URE_text = isl_printer_get_str(p_str); // isl_printer_free(p_str); // data->URE = (struct t2s_URE **)realloc(data->URE, sizeof(struct t2s_URE *) * (data->URE_num + 1)); // data->URE[data->URE_num] = create_t2s_URE(data->URE, data->URE_num, strdup(drain_func_name), URE_text, 1, ctx); // data->URE_num++; p_str = isl_printer_to_str(ctx); p_str = isl_printer_print_str(p_str, drain_func_str); p_str = isl_printer_print_str(p_str, " = "); p_str = isl_printer_print_str(p_str, "select("); p_str = isl_printer_print_str(p_str, writeout_domain_str); p_str = isl_printer_print_str(p_str, ", "); p_str = isl_printer_print_str(p_str, func_str); // p_str = isl_printer_print_str(p_str, ", "); // p_str = isl_printer_print_str(p_str, drain_func_str); p_str = isl_printer_print_str(p_str, ");\n"); char *URE_text = isl_printer_get_str(p_str); isl_printer_free(p_str); // data->t2s_stmt_text = (char **)realloc(data->t2s_stmt_text, sizeof(char *) * (data->t2s_stmt_num + 1)); // data->t2s_stmt_text[data->t2s_stmt_num] = URE_text; // data->t2s_stmt_num++; data->URE = (struct t2s_URE **)realloc(data->URE, sizeof(struct t2s_URE *) * (data->URE_num + 1)); data->URE[data->URE_num] = create_t2s_URE(data->URE, data->URE_num, strdup(drain_func_name), URE_text, 1, ctx); data->URE_num++; isl_id_free(func); free(drain_func_name); free(func_str); free(drain_func_str); free(writeout_domain_str); // free(acc_str); } isl_id_free(id); isl_multi_pw_aff_free(index); return 0; } /* Generate UREs for live-out accesses. */ static isl_stat extract_drain_URE(__isl_take struct ppcg_stmt *stmt, struct t2s_data *data) { pet_tree_foreach_access_expr(stmt->stmt->body, &t2s_drain_URE_access, data); ppcg_stmt_free(stmt); return isl_stat_ok; } /* Generate the drain UREs for the intermediate variables in the user statement. */ static __isl_give isl_schedule_node *gen_drain_stmt_text(__isl_take isl_schedule_node *node, void *user) { struct t2s_data *data = user; struct t2s_stmt_data *stmt_data; struct ppcg_stmt *stmt; isl_set *domain; isl_space *space; isl_id *id; if (!node) return NULL; if (isl_schedule_node_get_type(node) != isl_schedule_node_leaf) return node; /* Find the statement. */ stmt = isl_calloc_type(data->ctx, struct ppcg_stmt); domain = isl_set_from_union_set(isl_schedule_node_get_domain(node)); space = isl_set_get_space(domain); id = isl_space_get_tuple_id(space, isl_dim_set); stmt->stmt = find_stmt(data->scop, id); isl_space_free(space); isl_set_free(domain); stmt_data = isl_calloc_type(data->ctx, struct t2s_stmt_data); stmt_data->stmt_num = 0; stmt_data->stmts = NULL; stmt_data->stmt_domain = NULL; stmt_data->stmt_deps = NULL; stmt_data->n_acc_group = 0; stmt_data->n_dep_per_acc_group = 0; stmt_data->dep_stmt_pair = NULL; stmt_data->iterator_map = NULL; isl_set_list *stmt_domain_list = isl_union_set_get_set_list(data->stmt_domain); for (int i = 0; i < isl_union_set_n_set(data->stmt_domain); i++) { isl_set *stmt_domain_i = isl_set_list_get_set(stmt_domain_list, i); isl_space *space_i = isl_set_get_space(stmt_domain_i); isl_id *id_i = isl_space_get_tuple_id(space_i, isl_dim_set); if (id_i == id) { stmt_data->stmt_anchor_domain = isl_set_copy(stmt_domain_i); } isl_set_free(stmt_domain_i); isl_space_free(space_i); isl_id_free(id_i); } isl_set_list_free(stmt_domain_list); isl_id_free(id); data->stmt_data = stmt_data; /* Extract the schedule. */ isl_map *sched = isl_map_from_union_map(isl_schedule_node_get_prefix_schedule_relation(node)); sched = isl_map_reverse(sched); isl_pw_multi_aff *iterator_map = isl_pw_multi_aff_from_map(sched); data->stmt_data->iterator_map = iterator_map; extract_drain_URE(stmt, data); data->stmt_data = t2s_stmt_data_free(stmt_data); return node; } /* Print UREs in T2S code. */ static __isl_give isl_schedule *gen_stmt_text_wrap(__isl_take isl_schedule *schedule, struct t2s_data *data) { data->p = isl_printer_start_line(data->p); data->p = isl_printer_print_str(data->p, "// UREs"); data->p = isl_printer_end_line(data->p); /* Generate the reuse (RAR) statement. */ schedule = isl_schedule_map_schedule_node_bottom_up(schedule, &gen_op_stmt_text, data); /* Traverse each statmenet, build the ppcg_stmt struct and update * the ref2expr using T2S functions. * Print the stmt to data->t2s_stmt_text. */ /* Option 1: Generate multiple UREs for one statement. */ schedule = isl_schedule_map_schedule_node_bottom_up(schedule, &gen_stmt_text, data); // /* Option 2: Generate single URE for one statement. (Buggy)*/ // schedule = isl_schedule_map_schedule_node_bottom_up(schedule, // &gen_stmt_text_single, data); /* Generate the drain statement. */ schedule = isl_schedule_map_schedule_node_bottom_up(schedule, &gen_drain_stmt_text, data); // /* Print out the T2S stmt texts. */ // for (int i = 0; i < data->t2s_stmt_num; i++) { //// data->p = isl_printer_start_line(data->p); // data->p = isl_printer_print_str(data->p, data->t2s_stmt_text[i]); //// data->p = isl_printer_end_line(data->p); // } /* Print out the URE texts. */ for (int i = 0; i < data->URE_num; i++) { data->p = isl_printer_print_str(data->p, data->URE[i]->text); } data->p = isl_printer_start_line(data->p); data->p = isl_printer_end_line(data->p); return schedule; } /* Extract the detailed information of iterators in the code, including: * - iterator name * - lower bound * - upper bound * - stride */ static isl_stat extract_iters(__isl_keep isl_schedule *schedule, struct t2s_data *data) { isl_ctx *ctx; isl_set *domain = isl_set_copy(data->anchor_domain); int iter_num = data->iter_num; ctx = isl_set_get_ctx(domain); // // debug // isl_printer *p = isl_printer_to_file(ctx, stdout); // p = isl_printer_print_set(p, domain); // printf("\n"); // // debug /* Peel off the scalar dimensions. */ domain = t2s_peel_off_scalar_dims(domain, schedule); // // debug //// isl_printer *p = isl_printer_to_file(ctx, stdout); // p = isl_printer_print_set(p, domain); // printf("\n"); // // debug data->iter = (struct polysa_iter **)malloc(sizeof(struct polysa_iter *) * iter_num); isl_map *domain_map = isl_map_from_range(isl_set_copy(domain)); isl_fixed_box *box = isl_map_get_range_simple_fixed_box_hull(domain_map); isl_multi_aff *offset; isl_multi_val *size; isl_map_free(domain_map); if (isl_fixed_box_is_valid(box)) { offset = isl_fixed_box_get_offset(box); size = isl_fixed_box_get_size(box); // // debug // p = isl_printer_print_multi_aff(p, offset); // printf("\n"); // p = isl_printer_print_multi_val(p, size); // printf("\n"); // // debug } for (int i = 0; i < data->iter_num; i++) { struct polysa_iter *iter = (struct polysa_iter *)malloc(sizeof(struct polysa_iter)); /* Stride. */ isl_stride_info *si; si = isl_set_get_stride_info(domain, i); isl_val *s = isl_stride_info_get_stride(si); // // debug // p = isl_printer_print_val(p, s); // printf("\n"); // // debug iter->stride = isl_val_get_num_si(s); isl_val_free(s); isl_stride_info_free(si); /* Name. */ char iter_name[100]; sprintf(iter_name, "c%d", i); iter->name = strdup(iter_name); char ts_iter_name[100]; if (data->prog->type == POLYSA_SA_TYPE_ASYNC) { if (i < data->prog->space_w) { sprintf(ts_iter_name, "sloop%d", i); } else { sprintf(ts_iter_name, "tloop%d", i - data->prog->space_w); } } else if (data->prog->type == POLYSA_SA_TYPE_SYNC) { if (i < data->prog->time_w) { sprintf(ts_iter_name, "tloop%d", i); } else { sprintf(ts_iter_name, "sloop%d", i - data->prog->time_w); } } iter->ts_name = strdup(ts_iter_name); /* Bounds. */ if (isl_fixed_box_is_valid(box)) { isl_aff *offset_i = isl_multi_aff_get_aff(offset, i); iter->lb = isl_aff_copy(offset_i); // // debug // p = isl_printer_print_aff(p, offset_i); // printf("\n"); // p = isl_printer_set_output_format(p, ISL_FORMAT_C); // p = isl_printer_print_aff(p, offset_i); // printf("\n"); // // debug isl_val *size_i = isl_multi_val_get_val(size, i); offset_i = isl_aff_add_constant_val(offset_i, size_i); offset_i = isl_aff_add_constant_si(offset_i, -1); iter->ub = offset_i; } data->iter[i] = iter; } if (isl_fixed_box_is_valid(box)) { isl_multi_aff_free(offset); isl_multi_val_free(size); } isl_fixed_box_free(box); isl_set_free(domain); return isl_stat_ok; } /* Extract the dependence (RAW, RAR, WAW) from the program. */ static __isl_give isl_schedule *extract_deps(__isl_take isl_schedule *schedule, struct t2s_data *data) { isl_schedule_node *band; isl_union_map *dep_flow; isl_union_map *dep_rar; isl_union_map *dep_waw; isl_union_map *dep_total; isl_basic_map_list *deps; int ndeps; isl_basic_map *dep_i; struct polysa_dep *p_dep_i; isl_vec *disvec; // // debug // isl_printer *p = isl_printer_to_file(data->ctx, stdout); // p = isl_printer_set_yaml_style(p, ISL_YAML_STYLE_BLOCK); // p = isl_printer_print_schedule(p, schedule); // printf("\n"); // // debug if (data->scop->options->t2s_tile && data->scop->options->t2s_tile_phase == 1) { band = isl_schedule_get_root(schedule); band = isl_schedule_node_child(band, 0); } else { band = get_outermost_permutable_node(schedule); } // // debug // p = isl_printer_print_schedule_node(p, band); // printf("\n"); // // debug dep_flow = data->scop->tagged_dep_flow; dep_rar = data->scop->tagged_dep_rar; dep_waw = data->scop->tagged_dep_waw; /* Add RAW deps. */ deps = isl_union_map_get_basic_map_list(dep_flow); ndeps = isl_union_map_n_basic_map(dep_flow); data->ndeps = ndeps; data->deps = (struct polysa_dep **)malloc(data->ndeps * sizeof(struct polysa_dep *)); for (int i = 0; i < ndeps; i++) { p_dep_i = (struct polysa_dep *)malloc(sizeof(struct polysa_dep)); dep_i = isl_basic_map_list_get_basic_map(deps, i); p_dep_i->isl_dep = isl_basic_map_copy(dep_i); isl_map *untagged_dep_i = isl_map_factor_domain(isl_map_from_basic_map(isl_basic_map_copy(dep_i))); isl_basic_map *bmap_dep_i = isl_basic_map_from_map(untagged_dep_i); disvec = get_dep_dis_at_schedule(bmap_dep_i, schedule); /* The generated dependece distance vector contains the scalar dim, * we will need to peel them off. */ disvec = t2s_peel_off_scalar_dims_vec(disvec, schedule); // // debug // isl_printer *p = isl_printer_to_file(data->ctx, stdout); // p = isl_printer_print_basic_map(p, p_dep_i->isl_dep); // printf("\n"); // p = isl_printer_print_vec(p, disvec); // printf("\n"); // isl_printer_free(p); // // debug isl_basic_map_free(bmap_dep_i); isl_space *space = isl_basic_map_get_space(dep_i); isl_space *src_space = isl_space_unwrap(isl_space_domain(isl_space_copy(space))); isl_space *dest_space = isl_space_unwrap(isl_space_range(space)); isl_id *src_id = isl_space_get_tuple_id(src_space, isl_dim_out); isl_id *dest_id = isl_space_get_tuple_id(dest_space, isl_dim_out); isl_space_free(src_space); isl_space_free(dest_space); untagged_dep_i = isl_map_factor_domain(isl_map_from_basic_map(dep_i)); isl_set *src_domain = isl_map_domain(isl_map_copy(untagged_dep_i)); isl_set *dest_domain = isl_map_range(untagged_dep_i); isl_union_map *sched = isl_schedule_node_get_subtree_schedule_union_map(band); isl_union_map *sched_src = isl_union_map_intersect_domain(isl_union_map_copy(sched), isl_union_set_from_set(isl_set_copy(src_domain))); isl_union_map *sched_dest = isl_union_map_intersect_domain(sched, isl_union_set_from_set(isl_set_copy(dest_domain))); p_dep_i->src_sched_domain = isl_set_from_union_set(isl_union_map_range(sched_src)); p_dep_i->dest_sched_domain = isl_set_from_union_set(isl_union_map_range(sched_dest)); /* Add the tuple name */ p_dep_i->src_sched_domain = isl_set_set_tuple_name(p_dep_i->src_sched_domain, isl_set_get_tuple_name(src_domain)); p_dep_i->dest_sched_domain = isl_set_set_tuple_name(p_dep_i->dest_sched_domain, isl_set_get_tuple_name(dest_domain)); isl_set_free(src_domain); isl_set_free(dest_domain); p_dep_i->src = src_id; p_dep_i->dest = dest_id; p_dep_i->disvec = disvec; p_dep_i->type = POLYSA_DEP_RAW; data->deps[i] = p_dep_i; } isl_basic_map_list_free(deps); /* Add RAR deps. */ deps = isl_union_map_get_basic_map_list(dep_rar); ndeps = isl_union_map_n_basic_map(dep_rar); data->deps = (struct polysa_dep **)realloc(data->deps, (data->ndeps + ndeps) * sizeof(struct polysa_dep *)); for (int i = 0; i < ndeps; i++) { p_dep_i = (struct polysa_dep *)malloc(sizeof(struct polysa_dep)); dep_i = isl_basic_map_list_get_basic_map(deps, i); p_dep_i->isl_dep = isl_basic_map_copy(dep_i); isl_map *untagged_dep_i = isl_map_factor_domain(isl_map_from_basic_map(isl_basic_map_copy(dep_i))); isl_basic_map *bmap_dep_i = isl_basic_map_from_map(untagged_dep_i); disvec = get_dep_dis_at_schedule(bmap_dep_i, schedule); /* The generated dependece distance vector contains the scalar dim, * we will need to peel them off. */ disvec = t2s_peel_off_scalar_dims_vec(disvec, schedule); isl_basic_map_free(bmap_dep_i); isl_space *space = isl_basic_map_get_space(dep_i); isl_space *src_space = isl_space_unwrap(isl_space_domain(isl_space_copy(space))); isl_space *dest_space = isl_space_unwrap(isl_space_range(space)); isl_id *src_id = isl_space_get_tuple_id(src_space, isl_dim_out); isl_id *dest_id = isl_space_get_tuple_id(dest_space, isl_dim_out); isl_space_free(src_space); isl_space_free(dest_space); untagged_dep_i = isl_map_factor_domain(isl_map_from_basic_map(dep_i)); isl_set *src_domain = isl_map_domain(isl_map_copy(untagged_dep_i)); isl_set *dest_domain = isl_map_range(untagged_dep_i); // // debug // isl_printer *p = isl_printer_to_file(data->ctx, stdout); // p = isl_printer_print_set(p, src_domain); // printf("\n"); // // debug isl_union_map *sched = isl_schedule_node_get_subtree_schedule_union_map(band); // // debug // p = isl_printer_print_union_map(p, sched); // printf("\n"); // // debug isl_union_map *sched_src = isl_union_map_intersect_domain(isl_union_map_copy(sched), isl_union_set_from_set(isl_set_copy(src_domain))); isl_union_map *sched_dest = isl_union_map_intersect_domain(sched, isl_union_set_from_set(isl_set_copy(dest_domain))); p_dep_i->src_sched_domain = isl_set_from_union_set(isl_union_map_range(sched_src)); p_dep_i->dest_sched_domain = isl_set_from_union_set(isl_union_map_range(sched_dest)); // // debug // p = isl_printer_print_set(p, p_dep_i->src_sched_domain); // printf("\n"); // // debug /* Add the tuple name */ p_dep_i->src_sched_domain = isl_set_set_tuple_name(p_dep_i->src_sched_domain, isl_set_get_tuple_name(src_domain)); p_dep_i->dest_sched_domain = isl_set_set_tuple_name(p_dep_i->dest_sched_domain, isl_set_get_tuple_name(dest_domain)); isl_set_free(src_domain); isl_set_free(dest_domain); p_dep_i->src = src_id; p_dep_i->dest = dest_id; p_dep_i->disvec = disvec; p_dep_i->type = POLYSA_DEP_RAR; data->deps[i + data->ndeps] = p_dep_i; } data->ndeps += ndeps; isl_basic_map_list_free(deps); /* Add WAW deps. */ deps = isl_union_map_get_basic_map_list(dep_waw); ndeps = isl_union_map_n_basic_map(dep_waw); data->deps = (struct polysa_dep **)realloc(data->deps, (data->ndeps + ndeps) * sizeof(struct polysa_dep *)); for (int i = 0; i < ndeps; i++) { p_dep_i = (struct polysa_dep *)malloc(sizeof(struct polysa_dep)); dep_i = isl_basic_map_list_get_basic_map(deps, i); p_dep_i->isl_dep = isl_basic_map_copy(dep_i); isl_map *untagged_dep_i = isl_map_factor_domain(isl_map_from_basic_map(isl_basic_map_copy(dep_i))); isl_basic_map *bmap_dep_i = isl_basic_map_from_map(untagged_dep_i); disvec = get_dep_dis_at_schedule(bmap_dep_i, schedule); /* The generated dependece distance vector contains the scalar dim, * we will need to peel them off. */ disvec = t2s_peel_off_scalar_dims_vec(disvec, schedule); isl_basic_map_free(bmap_dep_i); isl_space *space = isl_basic_map_get_space(dep_i); isl_space *src_space = isl_space_unwrap(isl_space_domain(isl_space_copy(space))); isl_space *dest_space = isl_space_unwrap(isl_space_range(space)); isl_id *src_id = isl_space_get_tuple_id(src_space, isl_dim_out); isl_id *dest_id = isl_space_get_tuple_id(dest_space, isl_dim_out); isl_space_free(src_space); isl_space_free(dest_space); untagged_dep_i = isl_map_factor_domain(isl_map_from_basic_map(dep_i)); isl_set *src_domain = isl_map_domain(isl_map_copy(untagged_dep_i)); isl_set *dest_domain = isl_map_range(untagged_dep_i); isl_union_map *sched = isl_schedule_node_get_subtree_schedule_union_map(band); isl_union_map *sched_src = isl_union_map_intersect_domain(isl_union_map_copy(sched), isl_union_set_from_set(isl_set_copy(src_domain))); isl_union_map *sched_dest = isl_union_map_intersect_domain(sched, isl_union_set_from_set(isl_set_copy(dest_domain))); p_dep_i->src_sched_domain = isl_set_from_union_set(isl_union_map_range(sched_src)); p_dep_i->dest_sched_domain = isl_set_from_union_set(isl_union_map_range(sched_dest)); /* Add the tuple name */ p_dep_i->src_sched_domain = isl_set_set_tuple_name(p_dep_i->src_sched_domain, isl_set_get_tuple_name(src_domain)); p_dep_i->dest_sched_domain = isl_set_set_tuple_name(p_dep_i->dest_sched_domain, isl_set_get_tuple_name(dest_domain)); isl_set_free(src_domain); isl_set_free(dest_domain); p_dep_i->src = src_id; p_dep_i->dest = dest_id; p_dep_i->disvec = disvec; p_dep_i->type = POLYSA_DEP_WAW; data->deps[i + data->ndeps] = p_dep_i; } data->ndeps += ndeps; isl_basic_map_list_free(deps); isl_schedule_node_free(band); return schedule; } static __isl_null struct t2s_URE *t2s_URE_free(__isl_take struct t2s_URE *u) { if (!u) return NULL; free(u->name); free(u->text); free(u); return NULL; } __isl_null struct t2s_data *t2s_data_free(__isl_take struct t2s_data *d) { if (!d) return NULL; isl_set_free(d->anchor_domain); isl_union_set_free(d->stmt_domain); isl_union_set_free(d->stmt_sim_domain); for (int i = 0; i < d->t2s_stmt_num; i++) { free(d->t2s_stmt_text[i]); } free(d->t2s_stmt_text); for (int i = 0; i < d->URE_num; i++) { t2s_URE_free(d->URE[i]); } free(d->URE); for (int i = 0; i < d->iter_num; i++) { polysa_iter_free(d->iter[i]); } free(d->iter); isl_printer_free(d->p); for (int i = 0; i < d->ndeps; i++) { polysa_dep_free(d->deps[i]); } free(d->deps); t2s_stmt_data_free(d->stmt_data); isl_id_to_id_free(d->ref2func); for (int i = 0; i < d->n_array; i++) { struct t2s_array_info *array = &d->array[i]; for (int j = 0; j < array->n_group; j++) { t2s_array_ref_group_free(array->groups[j]); } free(array->groups); } free(d->array); t2s_group_data_free(d->group_data); isl_id_list_free(d->func_ids); free(d); return NULL; } /* Generate T2S headers. */ static isl_stat gen_t2s_headers(struct t2s_data *data) { isl_printer *p = data->p; p = isl_printer_start_line(p); p = isl_printer_print_str(p, "#include \"Halide.h\""); p = isl_printer_end_line(p); p = isl_printer_print_str(p, "#include <iostream>"); p = isl_printer_end_line(p); p = isl_printer_start_line(p); p = isl_printer_end_line(p); p = isl_printer_start_line(p); p = isl_printer_print_str(p, "using namespace Halide;"); p = isl_printer_end_line(p); p = isl_printer_start_line(p); p = isl_printer_print_str(p, "using namespace std;\n"); p = isl_printer_end_line(p); p = isl_printer_start_line(p); p = isl_printer_end_line(p); return isl_stat_ok; } /* Generate T2S inputs. */ static isl_stat gen_t2s_inputs(struct t2s_data *data) { isl_printer *p = data->p; p = isl_printer_start_line(p); p = isl_printer_print_str(p, "// Inputs (Fill in manually)"); p = isl_printer_end_line(p); p = isl_printer_start_line(p); p = isl_printer_end_line(p); } /* Generate T2S variable declarations. */ static isl_stat gen_t2s_vars(struct t2s_data *data) { isl_printer *p = data->p; p = isl_printer_start_line(p); p = isl_printer_print_str(p, "// Variable declarations"); p = isl_printer_end_line(p); p = isl_printer_print_str(p, "Var "); for (int i = 0; i < data->iter_num; i++) { char iter_str[100]; sprintf(iter_str, "c%d", i); if (i > 0) { p = isl_printer_print_str(p, ", "); } p = isl_printer_print_str(p, iter_str); } p = isl_printer_print_str(p, ";"); p = isl_printer_end_line(p); p = isl_printer_start_line(p); p = isl_printer_end_line(p); return isl_stat_ok; } /* Fill up the group arrays with singleton groups, i.e., one group * per reference, initializing the array, access, write, n_ref and refs fields. * In particular the access field is initialized to the scheduled access relation * of the array references. * * Return the number of elements initialized, i.e., the number of * active references in the current kernel. */ static int t2s_populate_array_references(struct t2s_array_info *info, struct t2s_array_ref_group **groups, struct t2s_data *data) { isl_ctx *ctx = data->ctx; int n = 0; for (int i = 0; i < info->array->n_ref; i++) { isl_union_map *umap; isl_map *map; struct t2s_array_ref_group *group; struct gpu_stmt_access *access = info->array->refs[i]; map = isl_map_copy(access->access); umap = isl_union_map_from_map(map); umap = isl_union_map_apply_domain(umap, isl_union_map_copy(data->group_data->full_sched)); if (isl_union_map_is_empty(umap)) { isl_union_map_free(umap); continue; } map = isl_map_from_union_map(umap); map = isl_map_detect_equalities(map); group = isl_calloc_type(ctx, struct t2s_array_ref_group); if (!group) { isl_map_free(map); return -1; } group->t2s_array = info; group->array = info->array; group->access = map; group->write = access->write; group->exact_write = access->exact_write; group->refs = &info->array->refs[i]; group->n_ref = 1; groups[n++] = group; } return n; } static struct t2s_array_ref_group *join_groups( struct t2s_array_ref_group *group1, struct t2s_array_ref_group *group2) { isl_ctx *ctx; struct t2s_array_ref_group *group; if (!group1 || !group2) return NULL; ctx = isl_map_get_ctx(group1->access); group = isl_calloc_type(ctx, struct t2s_array_ref_group); if (!group) return NULL; group->t2s_array = group1->t2s_array; group->array = group1->array; group->access = isl_map_union(isl_map_copy(group1->access), isl_map_copy(group2->access)); group->write = group1->write || group2->write; group->exact_write = group1->exact_write && group2->exact_write; group->n_ref = group1->n_ref + group2->n_ref; group->refs = isl_alloc_array(ctx, struct gpu_stmt_access *, group->n_ref); if (!group->refs) return t2s_array_ref_group_free(group); for (int i = 0; i < group1->n_ref; i++) group->refs[i] = group1->refs[i]; for (int i = 0; i < group2->n_ref; i++) group->refs[group1->n_ref + i] = group2->refs[i]; return group; } /* Combine the given two groups into a single group and free * the original two groups. */ static struct t2s_array_ref_group *join_groups_and_free( struct t2s_array_ref_group *group1, struct t2s_array_ref_group *group2) { struct t2s_array_ref_group *group; group = join_groups(group1, group2); t2s_array_ref_group_free(group1); t2s_array_ref_group_free(group2); return group; } /* Group two writes if their live-in ranges overalp at the current iteration. */ static int t2s_group_writes(int n, struct t2s_array_ref_group **groups, int (*overlap)(struct t2s_array_ref_group *group1, struct t2s_array_ref_group *group2, void *user), struct t2s_data *data) { int i, j; int any_merge; for (i = 0; i < n; i += !any_merge) { any_merge = 0; for (j = n - 1; j > i; j--) { if (!overlap(groups[i], groups[j], data)) continue; any_merge = 1; groups[i] = join_groups_and_free(groups[i], groups[j]); if (j != n - 1) groups[j] = groups[n - 1]; groups[n - 1] = NULL; n--; if (!groups[i]) return -1; } } return n; } /* For each dependence, if the dependence distance are all zero by the members of the schedule band, * then, compute the live-range from the src to the dest of the dependence. * Otherwise, compute the live-range by not considering the dest of the dependence. */ static __isl_give isl_set *t2s_compute_dep_live_range(struct polysa_dep *d, struct t2s_data *data) { isl_basic_map *bmap; isl_basic_set *bset; isl_map *map; isl_set *set; isl_set *src_set; isl_set *dest_set; isl_map *lex_map; isl_union_map *sched; isl_set *live_range; bmap = isl_basic_map_copy(d->isl_dep); map = isl_map_factor_domain(isl_map_from_basic_map(bmap)); set = isl_map_domain(map); sched = isl_union_map_copy(data->group_data->full_sched); sched = isl_union_map_intersect_domain(sched, isl_union_set_from_set(set)); set = isl_map_range(isl_map_from_union_map(sched)); lex_map = isl_map_lex_le(isl_set_get_space(set)); for (int i = 0; i < data->iter_num; i++) { lex_map = isl_map_equate(lex_map, isl_dim_in, i, isl_dim_out, i); } src_set = isl_set_apply(set, lex_map); if (isl_vec_is_zero(d->disvec)) { bmap = isl_basic_map_copy(d->isl_dep); map = isl_map_factor_domain(isl_map_from_basic_map(bmap)); set = isl_map_range(map); sched = isl_union_map_copy(data->group_data->full_sched); sched = isl_union_map_intersect_domain(sched, isl_union_set_from_set(set)); set = isl_map_range(isl_map_from_union_map(sched)); lex_map = isl_map_lex_gt(isl_set_get_space(set)); for (int i = 0; i < data->iter_num; i++) { lex_map = isl_map_equate(lex_map, isl_dim_in, i, isl_dim_out, i); } dest_set = isl_set_apply(set, lex_map); live_range = isl_set_intersect(src_set, dest_set); } else { live_range = src_set; } return live_range; } static int accesses_overlap(struct polysa_dep *d1, struct polysa_dep *d2, int wr, struct t2s_data *data) { isl_set *live_range1; isl_set *live_range2; int r; live_range1 = t2s_compute_dep_live_range(d1, data); live_range2 = t2s_compute_dep_live_range(d2, data); if (isl_set_is_disjoint(live_range1, live_range2)) { r = 0; } else { r = 1; } isl_set_free(live_range1); isl_set_free(live_range2); return r; } /* If both accesses are write accesses with RAW dep (intermediate access), * we will check if: * 1) both writes are local to the permutable band, i.e., if they are * assigned the same value by the members of the band. * 2) if the first condition holds, check if the live-ranges (RAW) of two accesses * are overlapped. * If both conditions hold, which means that these two write accesses * need to be assigned different T2S function names to avoid overwriting * the value of each other. We will return 1 and later group them into the * same array reference group. * * If both accesses are read accesses with RAR dep (intermediate access), * we will check if: * 1) both read are local to the permutable band, i.e., if they are * assigned the same value by the members of the band. * 2) if the first condition holds, check if the live-ranges (RAR) of two accesses * are overlapped. * If both conditions hold, which means that these two read accesses * need to be assigned different T2S function names to avoid overwriting * the value of each other. We will return 1 and later group them into the * same array reference group. * * For live-out accesses (drain accesses), we will need to assign them different * function names if they are updated in the same iteration. Currently, we don't * handle this scenario as there is only one statement that involves the live-out * accesses for all of the test cases. */ static int accesses_overlap_wrap(struct t2s_array_ref_group *group1, struct t2s_array_ref_group *group2, void *user) { struct t2s_data *data = user; // // debug // isl_printer *p = isl_printer_to_file(isl_map_get_ctx(group1->access), stdout); // p = isl_printer_print_map(p, group1->access); // printf("\n"); // p = isl_printer_print_map(p, group2->access); // printf("\n"); // isl_printer_free(p); // // debug for (int i = 0; i < group1->n_ref; i++) { for (int j = 0; j < group2->n_ref; j++) { struct gpu_stmt_access *ref1 = group1->refs[i]; struct gpu_stmt_access *ref2 = group2->refs[j]; if (ref1->write == 1 && ref2->write == 1) { for (int n = 0; n < data->ndeps; n++) { struct polysa_dep *dep1 = data->deps[n]; if (dep1->type == POLYSA_DEP_RAW && dep1->src == ref1->ref_id) { for (int m = 0; m < data->ndeps; m++) { struct polysa_dep *dep2 = data->deps[m]; if (dep2->type == POLYSA_DEP_RAW && dep2->src == ref2->ref_id) { /* Examine if two write accesses are overlapped. */ return accesses_overlap(dep1, dep2, 0, data); } } } } } else if (ref1->read == 1 && ref2->read == 1) { for (int n = 0; n < data->ndeps; n++) { struct polysa_dep *dep1 = data->deps[n]; if (dep1->type == POLYSA_DEP_RAR && dep1->src == ref1->ref_id) { for (int m = 0; m < data->ndeps; m++) { struct polysa_dep *dep2 = data->deps[m]; if (dep2->type == POLYSA_DEP_RAR && dep2->src == ref2->ref_id) { /* Examine if two read accesses are overlapped. */ return accesses_overlap(dep1, dep2, 1, data); } } } } } else { return 0; } } } return 0; } static int t2s_group_overlapping_writes(int n, struct t2s_array_ref_group **groups, struct t2s_data *data) { return t2s_group_writes(n, groups, &accesses_overlap_wrap, data); } /* Set array->n_group and array->groups to n and groups. * * Additionally, set the "nr" field of each group. */ static void t2s_set_array_groups(struct t2s_array_info *array, int n, struct t2s_array_ref_group **groups) { int i; array->n_group = n; array->groups = groups; for (i = 0; i < n; i++) { groups[i]->nr = i; } } static void t2s_assign_array_group_func_id(struct t2s_data *data, struct t2s_array_info *array) { int max_n_ref = 0; int write = 0; if (data->ref2func == NULL) data->ref2func = isl_id_to_id_alloc(data->ctx, 0); if (data->func_ids == NULL) data->func_ids = isl_id_list_alloc(data->ctx, 0); for (int i = 0; i < array->n_group; i++) { struct t2s_array_ref_group *group = array->groups[i]; if (group->write == 1) write = 1; for (int r = 0; r < group->n_ref; r++) { struct gpu_stmt_access *ref = group->refs[r]; char func_name[100]; /* Fetch the array name */ isl_map *access = isl_map_copy(ref->access); isl_id *ref_id = isl_id_copy(ref->ref_id); isl_space *space = isl_map_get_space(access); const char *array_name = isl_space_get_tuple_name(space, isl_dim_out); isl_space_free(space); isl_map_free(access); if (r == 0) { sprintf(func_name, "%s", array_name); } else { sprintf(func_name, "%s_%d", array_name, r); } isl_id *func_id = isl_id_alloc(data->ctx, func_name, NULL); data->ref2func = isl_id_to_id_set(data->ref2func, ref_id, func_id); } if (group->n_ref > max_n_ref) max_n_ref = group->n_ref; } // // debug // printf("%s\n", array->array->type); // // debug /* Insert the function declaraitons. */ for (int i = 0; i < max_n_ref; i++) { char func_name[100]; if (i == 0) sprintf(func_name, "%s", array->array->name); else sprintf(func_name, "%s_%d", array->array->name, i); isl_id *func_id = isl_id_alloc(data->ctx, func_name, array->array); data->func_ids = isl_id_list_add(data->func_ids, func_id); if (write == 1) { /* Add the drain func. */ char func_name[100]; if (i == 0) sprintf(func_name, "%s_drain", array->array->name); else sprintf(func_name, "%s_%d_drain", array->array->name, i); isl_id *func_id = isl_id_alloc(data->ctx, func_name, array->array); data->func_ids = isl_id_list_add(data->func_ids, func_id); } } } static isl_stat gen_t2s_func_ids(struct t2s_data *data, struct t2s_array_info *array) { /* Populate the array groups. */ isl_ctx *ctx = data->ctx; struct t2s_array_ref_group **groups; groups = isl_calloc_array(ctx, struct t2s_array_ref_group *, array->array->n_ref); int n = t2s_populate_array_references(array, groups, data); /* Group overlapping writes. */ n = t2s_group_overlapping_writes(n, groups, data); /* Set the group information. */ t2s_set_array_groups(array, n, groups); /* Assign function names. */ t2s_assign_array_group_func_id(data, array); return isl_stat_ok; } /* Generate function declarations. * Assign a function name to each access reference. * First group references that access the same array together. * Connect all accesses in the same group together. * Inside each group, if the access is a write access (assocaited with * RAW), check if there is any other write access scheduled in-between this * write access and the read access that uses this data by RAW. * If so, break the edge between these two write accesses. * At last, compute the CCs of the graph, and assign a unique function name * to each CC of the array group. * * Inside each group, if the access is a read access (associated with * RAR), check if there is any other read access scheduled in-between this * read access and the read access that uses this data by RAR. * If so, break the edge between these two read accesses. * At last, compute the CCs of the graph, and assign a unique function name * to each CC of the array group. * * Inside each group, if the access is a write access (associated with * WAW), check if the write-out domain is empty. If not, generate a unique function * name to this write access as the drain function. */ static isl_stat gen_t2s_funcs(__isl_keep isl_schedule *schedule, struct t2s_data *data) { isl_ctx *ctx = data->ctx; isl_printer *p = data->p; struct t2s_group_data *group_data; struct gpu_prog *prog; isl_schedule_node *node; p = isl_printer_start_line(p); p = isl_printer_print_str(p, "// Function declarations"); p = isl_printer_end_line(p); /* Initialization. */ prog = gpu_prog_alloc(ctx, data->scop); group_data = isl_calloc_type(ctx, struct t2s_group_data); data->array = isl_calloc_array(ctx, struct t2s_array_info, prog->n_array); data->n_array = prog->n_array; for (int i = 0; i < prog->n_array; i++) { data->array[i].array = &prog->array[i]; } node = isl_schedule_get_root(schedule); group_data->full_sched = isl_schedule_node_get_subtree_schedule_union_map(node); isl_schedule_node_free(node); data->group_data = group_data; for (int i = 0; i < data->n_array; i++) { gen_t2s_func_ids(data, &data->array[i]); } // // debug // isl_printer *p_debug = isl_printer_to_file(data->ctx, stdout); // p_debug = isl_printer_print_id_to_id(p_debug, data->ref2func); // printf("\n"); // isl_printer_free(p_debug); // // debug data->group_data = t2s_group_data_free(group_data); /* Print the function decls. */ for (int i = 0; i < isl_id_list_n_id(data->func_ids); i++) { isl_id *func_id = isl_id_list_get_id(data->func_ids, i); struct gpu_array_info *array = isl_id_get_user(func_id); p = isl_printer_start_line(p); p = isl_printer_print_str(p, "#define FUNC_S"); p = isl_printer_print_int(p, i); p = isl_printer_print_str(p, " type_of<"); p = isl_printer_print_str(p, array->type); p = isl_printer_print_str(p, ">(), {"); for (int j = 0; j < data->iter_num; j++) { if (j > 0) p = isl_printer_print_str(p, ", "); p = isl_printer_print_str(p, "c"); p = isl_printer_print_int(p, j); } p = isl_printer_print_str(p, "}, Place::Host"); p = isl_printer_end_line(p); isl_id_free(func_id); } p = isl_printer_start_line(p); p = isl_printer_print_str(p, "Func "); for (int i = 0; i < isl_id_list_n_id(data->func_ids); i++) { isl_id *func_id = isl_id_list_get_id(data->func_ids, i); if (i > 0) { p = isl_printer_print_str(p, ", "); } p = isl_printer_print_str(p, isl_id_get_name(func_id)); p = isl_printer_print_str(p, "(FUNC_S"); p = isl_printer_print_int(p, i); p = isl_printer_print_str(p, ")"); isl_id_free(func_id); } p = isl_printer_print_str(p, ";"); p = isl_printer_end_line(p); p = isl_printer_start_line(p); p = isl_printer_end_line(p); gpu_prog_free(prog); return isl_stat_ok; } /* Generate T2S space-time transformation. */ static isl_stat gen_t2s_space_time(struct t2s_data *data) { struct t2s_URE *d_URE; isl_printer *p = data->p; p = isl_printer_start_line(p); p = isl_printer_print_str(p, "// Space-time transformation"); p = isl_printer_end_line(p); /* Define time and space loop variables. */ p = isl_printer_start_line(p); p = isl_printer_print_str(p, "Var "); int is_var_first = 1; for (int i = 0; i < data->prog->time_w; i++) { if (!is_var_first) { p = isl_printer_print_str(p, ", "); } p = isl_printer_print_str(p, "tloop"); p = isl_printer_print_int(p, i); if (is_var_first) { is_var_first = 0; } } for (int i = 0; i < data->prog->space_w; i++) { if (!is_var_first) { p = isl_printer_print_str(p, ", "); } p = isl_printer_print_str(p, "sloop"); p = isl_printer_print_int(p, i); if (is_var_first) { is_var_first = 0; } } p = isl_printer_print_str(p, ";"); p = isl_printer_end_line(p); for (int i = 0; i < data->URE_num; i++) { if (data->URE[i]->d == 1) { d_URE = data->URE[i]; if (d_URE->update_level == -1) break; } } assert(d_URE->update_level == -1); p = isl_printer_start_line(p); p = isl_printer_print_str(p, d_URE->name); p = isl_printer_print_str(p, "."); p = isl_printer_print_str(p, "merge_defs("); p = isl_printer_print_str(p, "{"); int is_first = 1; for (int i = 0; i < data->URE_num; i++) { struct t2s_URE *URE = data->URE[i]; if (URE->update_level >= 0) { if (!is_first) { p = isl_printer_print_str(p, ", "); } p = isl_printer_print_str(p, URE->name); if (is_first) is_first = 0; } } p = isl_printer_print_str(p, "}, {"); is_first = 1; for (int i = 0; i < data->URE_num; i++) { struct t2s_URE *URE = data->URE[i]; if (URE->update_level == -1 && URE->d == 0) { if (!is_first) { p = isl_printer_print_str(p, ", "); } p = isl_printer_print_str(p, URE->name); if (is_first) is_first = 0; } } p = isl_printer_print_str(p, "}"); p = isl_printer_print_str(p, ")"); p = isl_printer_end_line(p); p = isl_printer_indent(p, strlen(d_URE->name)); p = isl_printer_start_line(p); p = isl_printer_print_str(p, ".reorder_inward("); is_first = 1; for (int i = 0; i < data->iter_num; i++) { if (!is_first) p = isl_printer_print_str(p, ", "); char iter_name[100]; sprintf(iter_name, "c%d", i); p = isl_printer_print_str(p, iter_name); if (is_first) is_first = 0; } p = isl_printer_print_str(p, ")"); p = isl_printer_end_line(p); p = isl_printer_start_line(p); p = isl_printer_print_str(p, ".space_time_transform("); // TODO int indent = strlen(".space_time_transform("); p = isl_printer_indent(p, indent); p = isl_printer_print_str(p, "{"); for (int i = 0; i < data->iter_num; i++) { if (i > 0) p = isl_printer_print_str(p, ", "); char iter_name[100]; sprintf(iter_name, "c%d", i); p = isl_printer_print_str(p, iter_name); } p = isl_printer_print_str(p, "},"); p = isl_printer_end_line(p); /* Space and time loops. */ isl_printer *p_str = isl_printer_to_str(data->ctx); p_str = isl_printer_print_str(p_str, "{"); for (int i = 0; i < data->prog->time_w; i++) { if (i > 0) p_str = isl_printer_print_str(p_str, ", "); p_str = isl_printer_print_str(p_str, "tloop"); p_str = isl_printer_print_int(p_str, i); } p_str = isl_printer_print_str(p_str, "}"); char *tloop_list = isl_printer_get_str(p_str); isl_printer_free(p_str); p_str = isl_printer_to_str(data->ctx); p_str = isl_printer_print_str(p_str, "{"); for (int i = 0; i < data->prog->space_w; i++) { if (i > 0) p_str = isl_printer_print_str(p_str, ", "); p_str = isl_printer_print_str(p_str, "sloop"); p_str = isl_printer_print_int(p_str, i); } p_str = isl_printer_print_str(p_str, "}"); char *sloop_list = isl_printer_get_str(p_str); isl_printer_free(p_str); if (data->prog->type == POLYSA_SA_TYPE_ASYNC) { p = isl_printer_start_line(p); p = isl_printer_print_str(p, sloop_list); p = isl_printer_print_str(p, ","); p = isl_printer_end_line(p); p = isl_printer_start_line(p); p = isl_printer_print_str(p, tloop_list); p = isl_printer_print_str(p, ","); p = isl_printer_end_line(p); } else if (data->prog->type == POLYSA_SA_TYPE_SYNC) { p = isl_printer_start_line(p); p = isl_printer_print_str(p, tloop_list); p = isl_printer_print_str(p, ","); p = isl_printer_end_line(p); p = isl_printer_start_line(p); p = isl_printer_print_str(p, sloop_list); p = isl_printer_print_str(p, ","); p = isl_printer_end_line(p); } free(tloop_list); free(sloop_list); /* Transform matrix. */ for (int i = 0; i < data->iter_num; i++) { p = isl_printer_start_line(p); if (i == 0) { p = isl_printer_print_str(p, "{"); p = isl_printer_indent(p, 1); } for (int j = 0; j < data->iter_num; j++) { if (j > 0) p = isl_printer_print_str(p, ", "); if (i == j) p = isl_printer_print_int(p, 1); else p = isl_printer_print_int(p, 0); } if (i == data->iter_num - 1) { p = isl_printer_print_str(p, "}"); p = isl_printer_indent(p, -1); } p = isl_printer_print_str(p, ","); p = isl_printer_end_line(p); } /* Reverse transform matrix. */ for (int i = 0; i < data->iter_num; i++) { p = isl_printer_start_line(p); if (i == 0) { p = isl_printer_print_str(p, "{"); p = isl_printer_indent(p, 1); } for (int j = 0; j < data->iter_num; j++) { if (j > 0) p = isl_printer_print_str(p, ", "); if (i == j) p = isl_printer_print_int(p, 1); else p = isl_printer_print_int(p, 0); } if (i == data->iter_num - 1) { p = isl_printer_print_str(p, "}"); p = isl_printer_indent(p, -1); } if (i == data->iter_num - 1) p = isl_printer_print_str(p, ")"); else p = isl_printer_print_str(p, ","); p = isl_printer_end_line(p); } p = isl_printer_indent(p, -indent); p = isl_printer_start_line(p); p = isl_printer_print_str(p, ".domain("); int indent_tmp = strlen(".domain("); p = isl_printer_indent(p, indent_tmp); for (int i = 0; i < data->iter_num; i++) { if (i > 0) p = isl_printer_start_line(p); struct polysa_iter *iter = data->iter[i]; p = isl_printer_print_str(p, iter->name); p = isl_printer_print_str(p, ", "); p = isl_printer_set_output_format(p, ISL_FORMAT_C); p = isl_printer_print_aff(p, iter->lb); p = isl_printer_print_str(p, ", "); p = isl_printer_print_aff(p, iter->ub); p = isl_printer_print_str(p, ", "); p = isl_printer_print_int(p, iter->stride); p = isl_printer_print_str(p, ","); p = isl_printer_end_line(p); } /* Add time and space loops. */ for (int i = 0; i < data->iter_num; i++) { p = isl_printer_start_line(p); struct polysa_iter *iter = data->iter[i]; p = isl_printer_print_str(p, iter->ts_name); p = isl_printer_print_str(p, ", "); p = isl_printer_set_output_format(p, ISL_FORMAT_C); p = isl_printer_print_aff(p, iter->lb); p = isl_printer_print_str(p, ", "); p = isl_printer_print_aff(p, iter->ub); p = isl_printer_print_str(p, ", "); p = isl_printer_print_int(p, iter->stride); if (i == data->iter_num - 1) p = isl_printer_print_str(p, ");"); else p = isl_printer_print_str(p, ","); p = isl_printer_end_line(p); } p = isl_printer_indent(p, -indent_tmp); p = isl_printer_set_output_format(p, ISL_FORMAT_ISL); p = isl_printer_indent(p, -strlen(d_URE->name)); p = isl_printer_start_line(p); p = isl_printer_end_line(p); return isl_stat_ok; } static __isl_give struct t2s_data *t2s_data_init(__isl_take struct t2s_data *d) { d->anchor_domain = NULL; d->stmt_domain = NULL; d->stmt_sim_domain = NULL; d->URE = NULL; d->URE_num = 0; d->t2s_stmt_num = 0; d->t2s_stmt_text = NULL; d->iter_num = 0; d->iter = NULL; d->scop = NULL; d->p = NULL; d->ctx = NULL; d->deps = NULL; d->ndeps = 0; d->ref2func = NULL; // d->ref2dfunc = NULL; d->func_ids = NULL; d->stmt_data = NULL; d->n_array = 0; d->array = NULL; d->group_data = NULL; d->prog = NULL; d->schedule = NULL; } static isl_stat extract_anchor_domain(__isl_keep isl_schedule *schedule, struct t2s_data *data) { isl_schedule_node *root = isl_schedule_get_root(schedule); isl_union_set *domain = isl_schedule_get_domain(schedule); isl_union_map *sched = isl_schedule_node_get_subtree_schedule_union_map(root); isl_schedule_node_free(root); isl_union_set *anchor_domain = isl_union_set_apply(domain, sched); // // debug // isl_printer *p = isl_printer_to_file(isl_union_set_get_ctx(anchor_domain), stdout); // p = isl_printer_print_union_set(p, anchor_domain); // printf("\n"); // // debug data->anchor_domain = isl_set_from_union_set(anchor_domain); return isl_stat_ok; } /* Generate T2S code from schedule. */ static isl_stat print_t2s_with_schedule( __isl_keep struct polysa_prog *prog, __isl_keep struct ppcg_scop *scop) { struct t2s_data *data; isl_union_set *stmt_domains; isl_union_map *stmt_schedules; isl_schedule_node *node; isl_union_set *stmt_trans_domains; isl_space *stmt_space; isl_ctx *ctx; isl_schedule *schedule; int t2s_tile_second_phase; schedule = prog->schedule; ctx = isl_schedule_get_ctx(schedule); data = isl_calloc_type(ctx, struct t2s_data); data = t2s_data_init(data); t2s_tile_second_phase = (scop->options->t2s_tile && scop->options->t2s_tile_phase == 1); data->ctx = ctx; data->scop = scop; data->prog = prog; data->schedule = schedule; FILE *t2s_fp = fopen("t2s.cpp", "w"); data->p = isl_printer_to_file(ctx, t2s_fp); /* Print out the headers. */ gen_t2s_headers(data); data->p = isl_printer_start_line(data->p); data->p = isl_printer_print_str(data->p, "int main(void) {"); data->p = isl_printer_end_line(data->p); /* Calcualte the iterator num. */ data->iter_num = 0; isl_schedule_foreach_schedule_node_top_down(schedule, &update_depth, &data->iter_num); /* Update the deps. */ data->ndeps = 0; data->deps = NULL; schedule = extract_deps(schedule, data); /* Calculate the anchor domain. * Allocate a empty set then unionize it with the scheduling domain of each statement. */ data->anchor_domain = NULL; extract_anchor_domain(schedule, data); /* Generate the iterator meta data. */ extract_iters(schedule, data); /* Calculate the simplified domain (in scheduling dims) for each statement. */ data->stmt_domain = NULL; data->stmt_sim_domain = NULL; extract_stmt_domain(schedule, data); /* Generate input declarations. */ gen_t2s_inputs(data); /* Generate variable declarations. */ gen_t2s_vars(data); /* Generate function declarations. */ gen_t2s_funcs(schedule, data); /* Generate the T2S statements .*/ data->t2s_stmt_num = 0; data->t2s_stmt_text = NULL; schedule = gen_stmt_text_wrap(schedule, data); /* Generate time-space transformation. */ if (!t2s_tile_second_phase) { gen_t2s_space_time(data); } else { data->p = isl_printer_start_line(data->p); data->p = isl_printer_print_str(data->p, "// Space-time transformation (Fill in manually)"); data->p = isl_printer_end_line(data->p); data->p = isl_printer_start_line(data->p); data->p = isl_printer_end_line(data->p); } data->p = isl_printer_start_line(data->p); data->p = isl_printer_print_str(data->p, "// PE optimization (Fill in manually)"); data->p = isl_printer_end_line(data->p); data->p = isl_printer_start_line(data->p); data->p = isl_printer_end_line(data->p); data->p = isl_printer_start_line(data->p); data->p = isl_printer_print_str(data->p, "// CPU verification (Fill in manually)"); data->p = isl_printer_end_line(data->p); data->p = isl_printer_start_line(data->p); data->p = isl_printer_end_line(data->p); data->p = isl_printer_start_line(data->p); data->p = isl_printer_print_str(data->p, "}"); data->p = isl_printer_end_line(data->p); fclose(t2s_fp); t2s_data_free(data); prog->schedule = schedule; return isl_stat_ok; } static isl_bool no_band_node_as_descendant(__isl_keep isl_schedule_node *node, void *user){ enum isl_schedule_node_type node_type = isl_schedule_node_get_type(node); if (node_type == isl_schedule_node_band) { return isl_bool_false; } else { return isl_bool_true; } } /* No band node is allowed after the sequence or set node. */ static isl_bool t2s_legal_at_node(__isl_keep isl_schedule_node *node, void *user) { enum isl_schedule_node_type node_type = isl_schedule_node_get_type(node); if (node_type == isl_schedule_node_sequence || node_type == isl_schedule_node_set) { int n_node_has_band = 0; for (int n = 0; n < isl_schedule_node_n_children(node); n++) { node = isl_schedule_node_child(node, n); isl_bool no_band = isl_schedule_node_every_descendant(node, &no_band_node_as_descendant, NULL); if (!no_band) n_node_has_band++; } if (n_node_has_band > 2) { return isl_bool_false; } else { return isl_bool_true; } } else { return isl_bool_true; } } /* Check if there is only nested permuted band in the program. */ static isl_bool t2s_legality_check(__isl_keep isl_schedule *schedule) { isl_schedule_node *root = isl_schedule_get_root(schedule); isl_bool is_legal = isl_schedule_node_every_descendant(root, &t2s_legal_at_node, NULL); isl_schedule_node_free(root); return is_legal; } /* Generate CPU code for "scop" and print it to "p". * * First obtain a schedule for "scop" and then print code for "scop" * using that schedule. * * To generate T2S code from the tiled design, there are two phases. * In the first phase, a tiled CPU program is generated w/o T2S program. * In the second phase, the tiled CPU program is taken in and the T2S program * with tiled UREs are generated. */ static __isl_give isl_printer *generate(__isl_take isl_printer *p, struct ppcg_scop *scop, struct ppcg_options *options) { isl_schedule *schedule; int t2s_tile_second_phase = (options->t2s_tile && options->t2s_tile_phase == 1); int t2s_tile_first_phase = (options->t2s_tile && options->t2s_tile_phase == 0); if (t2s_tile_second_phase) { /* In the second phase, the reschedule is disabled so that the * original schedule from the program is used. */ options->reschedule = 0; } schedule = get_schedule(scop, options); // // debug // isl_printer *p_debug = isl_printer_to_file(isl_schedule_get_ctx(schedule), stdout); // p_debug = isl_printer_set_yaml_style(p_debug, ISL_YAML_STYLE_BLOCK); // p_debug = isl_printer_print_schedule(p_debug, schedule); // printf("\n"); // p_debug = isl_printer_print_union_map(p_debug, scop->tagged_dep_flow); // printf("\n"); // p_debug = isl_printer_print_union_map(p_debug, scop->tagged_dep_waw); // printf("\n"); // p_debug = isl_printer_print_union_map(p_debug, scop->tagged_dep_rar); // printf("\n"); // isl_printer_free(p_debug); // // debug if (!t2s_tile_second_phase) { /* Check if the program is legal to be mapped to systolic array. */ isl_bool is_legal = sa_legality_check(schedule, scop); if (is_legal != isl_bool_true) { printf("[PolySA] Illegal to be transformed to systolic array.\n"); } if (is_legal) { /* Generate systolic arrays using space-time mapping. */ isl_size num_sa = 0; struct polysa_prog **sa_candidates = sa_space_time_transform(schedule, scop, &num_sa); if (num_sa > 0) { printf("[PolySA] %d systolic arrays generated.\n", num_sa); } // TODO: All the SA candidates keep the same schedule tree. We need to duplicate them to // seperate the transformation performed on each array. /* Pick up one systolic array to preceed based on heuristics. */ struct polysa_prog *sa_opt = sa_candidates_smart_pick(sa_candidates, num_sa); if (t2s_tile_first_phase) { /* Apply PE optimization. */ sa_pe_optimize(sa_opt); } // // debug // // isl_printer *p = isl_printer_to_file(isl_schedule_get_ctx(sa_opt->schedule), stdout); // p_debug = isl_printer_print_schedule(p_debug, sa_opt->schedule); // printf("\n"); // // debug if (!t2s_tile_first_phase) { /* Generate T2S program. */ isl_bool is_t2s_legal = t2s_legality_check(sa_opt->schedule); if (is_t2s_legal) { print_t2s_with_schedule(sa_opt, scop); } else { printf("[PolySA] Illegal to be transformed to T2S program.\n"); } } schedule = isl_schedule_copy(sa_opt->schedule); polysa_prog_free(sa_opt); } } else { struct polysa_prog *sa = polysa_prog_from_schedule(schedule); sa->scop = scop; // TODO: sa->type // TODO: sa->array_dim // TODO: sa->array_part_w // TODO: sa->space_w // TODO: sa->time_w print_t2s_with_schedule(sa, scop); schedule = isl_schedule_copy(sa->schedule); polysa_prog_free(sa); } /* Generate the transformed CPU program. */ return print_cpu_with_schedule(p, scop, schedule, options); } /* Wrapper around generate for use as a ppcg_transform callback. */ static __isl_give isl_printer *print_polysa_t2s_wrap(__isl_take isl_printer *p, struct ppcg_scop *scop, void *user) { struct ppcg_options *options = user; return generate(p, scop, options); } /* Transform the code in the file called "input" by replacing * all scops by corresponding CPU code and write the results to a file * called "output". */ int generate_polysa_t2s(isl_ctx *ctx, struct ppcg_options *options, const char *input, const char *output) { FILE *output_file; int r; /* Return the handle of the output file. */ output_file = get_output_file(input, output); if (!output_file) return -1; /* Extract each scop from the program and call the callback * function to process it. */ r = ppcg_transform(ctx, input, output_file, options, &print_polysa_t2s_wrap, options); fclose(output_file); return r; }
GB_unop__cos_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__cos_fp64_fp64 // op(A') function: GB_unop_tran__cos_fp64_fp64 // C type: double // A type: double // cast: double cij = aij // unaryop: cij = cos (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cos (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = cos (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_COS || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__cos_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = cos (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = cos (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__cos_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
adjointnsincompressible.h
//***************************************************************************** // Title : src/equation/adjointnsincompressible.h // Author : Tanabe Yuta // Date : 2021/10/07 // Copyright : (C)2021 TanabeYuta //***************************************************************************** #pragma once namespace PANSLBM2 { namespace ANSin { // Function of updating macroscopic values of ANS for 2D template<class T, template<class>class P> void Macro(T &_ip, T &_iux, T &_iuy, T &_imx, T &_imy, T _rho, T _ux, T _uy, const T *_f0, const T *_f, int _idx) { _ip = _f0[_idx]*P<T>::ei[0]; _iux = -_f0[_idx]*P<T>::ei[0]*_ux; _iuy = -_f0[_idx]*P<T>::ei[0]*_uy; _imx = T(); _imy = T(); for (int c = 1; c < P<T>::nc; ++c) { T ciu = P<T>::cx[c]*_ux + P<T>::cy[c]*_uy; T fei = _f[P<T>::IndexF(_idx, c)]*P<T>::ei[c]; _ip += fei; _iux += fei*(P<T>::cx[c] + 3.0*ciu*P<T>::cx[c] - _ux); _iuy += fei*(P<T>::cy[c] + 3.0*ciu*P<T>::cy[c] - _uy); _imx += fei*P<T>::cx[c]; _imy += fei*P<T>::cy[c]; } } // Function of getting equilibrium of ANS for 2D template<class T, template<class>class P> void Equilibrium(T *_feq, T _ip, T _iux, T _iuy) { for (int c = 0; c < P<T>::nc; ++c) { _feq[c] = _ip + 3.0*(_iux*P<T>::cx[c] + _iuy*P<T>::cy[c]); } } // Function of applying external force with Brinkman model of ANS for 2D template<class T, template<class>class P> void ExternalForceBrinkman(T _imx, T _imy, T *_f, T _alpha, int _idx) { T coef = 3.0*_alpha/(1.0 + _alpha); for (int c = 1; c < P<T>::nc; ++c) { _f[P<T>::IndexF(_idx, c)] -= coef*(P<T>::cx[c]*_imx + P<T>::cy[c]*_imy); } } // Function of Update macro, External force(Brinkman model) and Collide of ANS for 2D template<class T, template<class>class P> void MacroBrinkmanCollide( P<T>& _p, const T *_rho, const T *_ux, const T *_uy, T *_ip, T *_iux, T *_iuy, T *_imx, T *_imy, T _viscosity, const T *_alpha, bool _issave = false ) { T omega = 1.0/(3.0*_viscosity + 0.5), iomega = 1.0 - omega, feq[P<T>::nc]; #pragma omp parallel for private(feq) for (int idx = 0; idx < _p.nxyz; ++idx) { // Update macro T ip, iux, iuy, imx, imy; Macro<T, P>(ip, iux, iuy, imx, imy, _rho[idx], _ux[idx], _uy[idx], _p.f0, _p.f, idx); // External force with Brinkman model ExternalForceBrinkman<T, P>(imx, imy, _p.f, _alpha[idx], idx); Macro<T, P>(ip, iux, iuy, imx, imy, _rho[idx], _ux[idx], _uy[idx], _p.f0, _p.f, idx); // Save macro if need if (_issave) { _ip[idx] = ip; _iux[idx] = iux; _iuy[idx] = iuy; _imx[idx] = imx; _imy[idx] = imy; } // Collide Equilibrium<T, P>(feq, ip, iux, iuy); _p.f0[idx] = iomega*_p.f0[idx] + omega*feq[0]; for (int c = 1; c < P<T>::nc; ++c) { int idxf = P<T>::IndexF(idx, c); _p.f[idxf] = iomega*_p.f[idxf] + omega*feq[c]; } } } // Function of setting initial condition of ANS for 2D template<class T, template<class>class P> void InitialCondition(P<T>& _p, const T *_ux, const T *_uy, const T *_ip, const T *_iux, const T *_iuy) { T feq[P<T>::nc]; #pragma omp parallel for private(feq) for (int idx = 0; idx < _p.nxyz; ++idx) { Equilibrium<T, P>(feq, _ip[idx], _iux[idx], _iuy[idx]); _p.f0[idx] = feq[0]; for (int c = 1; c < P<T>::nc; ++c) { _p.f[P<T>::IndexF(idx, c)] = feq[c]; } } } // Function of setting boundary condition of ANS set iU for D2Q9 template<class T, template<class>class P, class Ff> void iBoundaryConditionSetU(P<T>& _p, Ff _bctype, T _eps = T()) { // On xmin if (_p.PEx == 0) { for (int j = 0; j < _p.ny; ++j) { if (_bctype(0 + _p.offsetx, j + _p.offsety)) { int idx = _p.Index(0, j); _p.f[P<T>::IndexF(idx, 3)] = _p.f[P<T>::IndexF(idx, 1)] - 2.0*_eps/3.0; _p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 8)] - 2.0*_eps/3.0; _p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 5)] - 2.0*_eps/3.0; } } } // On xmax if (_p.PEx == _p.mx - 1) { for (int j = 0; j < _p.ny; ++j) { if (_bctype((_p.nx - 1) + _p.offsetx, j + _p.offsety)) { int idx = _p.Index(_p.nx - 1, j); _p.f[P<T>::IndexF(idx, 1)] = _p.f[P<T>::IndexF(idx, 3)] - 2.0*_eps/3.0; _p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 7)] - 2.0*_eps/3.0; _p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 6)] - 2.0*_eps/3.0; } } } // On ymin if (_p.PEy == 0) { for (int i = 0; i < _p.nx; ++i) { if (_bctype(i + _p.offsetx, 0 + _p.offsety)) { int idx = _p.Index(i, 0); _p.f[P<T>::IndexF(idx, 4)] = _p.f[P<T>::IndexF(idx, 2)] - 2.0*_eps/3.0; _p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 5)] - 2.0*_eps/3.0; _p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 6)] - 2.0*_eps/3.0; } } } // On ymax if (_p.PEy == _p.my - 1) { for (int i = 0; i < _p.nx; ++i) { if (_bctype(i + _p.offsetx, (_p.ny - 1) + _p.offsety)) { int idx = _p.Index(i, _p.ny - 1); _p.f[P<T>::IndexF(idx, 2)] = _p.f[P<T>::IndexF(idx, 4)] - 2.0*_eps/3.0; _p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 7)] - 2.0*_eps/3.0; _p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 8)] - 2.0*_eps/3.0; } } } } // Function of setting boundary condition of ANS set iRho for D2Q9 template<class T, template<class>class P, class Ff> void iBoundaryConditionSetRho2D(P<T>& _p, Ff _bctype) { // On xmin if (_p.PEx == 0) { for (int j = 0; j < _p.ny; ++j) { if (_bctype(0 + _p.offsetx, j + _p.offsety)) { int idx = _p.Index(0, j); T rho0 = (4.0*_p.f[P<T>::IndexF(idx, 1)] + _p.f[P<T>::IndexF(idx, 5)] + _p.f[P<T>::IndexF(idx, 8)])/3.0; _p.f[P<T>::IndexF(idx, 3)] = _p.f[P<T>::IndexF(idx, 1)] - rho0; _p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 8)] - rho0; _p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 5)] - rho0; } } } // On xmax if (_p.PEx == _p.mx - 1) { for (int j = 0; j < _p.ny; ++j) { if (_bctype((_p.nx - 1) + _p.offsetx, j + _p.offsety)) { int idx = _p.Index(_p.nx - 1, j); T rho0 = (4.0*_p.f[P<T>::IndexF(idx, 3)] + _p.f[P<T>::IndexF(idx, 6)] + _p.f[P<T>::IndexF(idx, 7)])/3.0; _p.f[P<T>::IndexF(idx, 1)] = _p.f[P<T>::IndexF(idx, 3)] - rho0; _p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 7)] - rho0; _p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 6)] - rho0; } } } // On ymin if (_p.PEy == 0) { for (int i = 0; i < _p.nx; ++i) { if (_bctype(i + _p.offsetx, 0 + _p.offsety)) { int idx = _p.Index(i, 0); T rho0 = (4.0*_p.f[P<T>::IndexF(idx, 2)] + _p.f[P<T>::IndexF(idx, 5)] + _p.f[P<T>::IndexF(idx, 6)])/3.0; _p.f[P<T>::IndexF(idx, 4)] = _p.f[P<T>::IndexF(idx, 2)] - rho0; _p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 5)] - rho0; _p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 6)] - rho0; } } } // On ymax if (_p.PEy == _p.my - 1) { for (int i = 0; i < _p.nx; ++i) { if (_bctype(i + _p.offsetx, (_p.ny - 1) + _p.offsety)) { int idx = _p.Index(i, _p.ny - 1); T rho0 = (4.0*_p.f[P<T>::IndexF(idx, 4)] + _p.f[P<T>::IndexF(idx, 7)] + _p.f[P<T>::IndexF(idx, 8)])/3.0; _p.f[P<T>::IndexF(idx, 2)] = _p.f[P<T>::IndexF(idx, 4)] - rho0; _p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 7)] - rho0; _p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 8)] - rho0; } } } } } }
DRB068-restrictpointer2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The restrict type qualifier is an indication to the compiler that, if the memory addressed by the restrict -qualified pointer is modified, no other pointer will access that same memory. If a particular chunk of memory is not modified, it can be aliased through more than one restricted pointer. A C99 restrict feature. For gcc, you must use -std=c99 to compile this program. */ #include <stdlib.h> #include <stdio.h> void init(int n, int * restrict a, int * restrict b, int * restrict c) { int i; #pragma omp parallel for private(i ) for (i = 0; i < n; i++) { a[i] = 1; b[i] = i; c[i] = i * i; } } void foo(int n, int * restrict a, int * restrict b, int * restrict c) { int i; #pragma omp parallel for private(i ) for (i = 0; i < n; i++) a[i] = b[i] + c[i]; } void print(int n, int * restrict a, int * restrict b, int * restrict c) { int i; for (i = 0; i < n; i++) { printf("%d %d %d\n", a[i], b[i], c[i]); } } int main() { int n = 1000; int * a , *b, *c; a = (int*) malloc (n* sizeof (int)); if (a ==0) { fprintf (stderr, "skip the execution due to malloc failures.\n"); return 1; } b = (int*) malloc (n* sizeof (int)); if (b ==0) { fprintf (stderr, "skip the execution due to malloc failures.\n"); return 1; } c = (int*) malloc (n* sizeof (int)); if (c ==0) { fprintf (stderr, "skip the execution due to malloc failures.\n"); return 1; } init (n, a, b,c); foo (n, a, b,c); print (n, a, b,c); free (a); free (b); free (c); return 0; }
image.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % % % MagickCore Image Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/delegate.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/magick-private.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/timer.h" #include "MagickCore/timer-private.h" #include "MagickCore/token.h" #include "MagickCore/token-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" #include "MagickCore/xwindow-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImage() returns a pointer to an image structure initialized to % default values. % % The format of the AcquireImage method is: % % Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AcquireImage(const ImageInfo *image_info, ExceptionInfo *exception) { const char *option; Image *image; MagickStatusType flags; /* Allocate image structure. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image=(Image *) AcquireCriticalMemory(sizeof(*image)); (void) memset(image,0,sizeof(*image)); /* Initialize Image structure. */ (void) CopyMagickString(image->magick,"MIFF",MagickPathExtent); image->storage_class=DirectClass; image->depth=MAGICKCORE_QUANTUM_DEPTH; image->colorspace=sRGBColorspace; image->rendering_intent=PerceptualIntent; image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.red_primary.z=0.0300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.green_primary.z=0.1000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.blue_primary.z=0.7900f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->chromaticity.white_point.z=0.3583f; image->interlace=NoInterlace; image->ticks_per_second=UndefinedTicksPerSecond; image->compose=OverCompositeOp; (void) QueryColorCompliance(MatteColor,AllCompliance,&image->matte_color, exception); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image->transparent_color,exception); GetTimerInfo(&image->timer); image->cache=AcquirePixelCache(0); image->channel_mask=DefaultChannels; image->channel_map=AcquirePixelChannelMap(); image->blob=CloneBlobInfo((BlobInfo *) NULL); image->timestamp=GetMagickTime(); image->debug=IsEventLogging(); image->reference_count=1; image->semaphore=AcquireSemaphoreInfo(); image->signature=MagickCoreSignature; if (image_info == (ImageInfo *) NULL) return(image); /* Transfer image info. */ SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue : MagickFalse); (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick,image_info->magick,MagickPathExtent); if (image_info->size != (char *) NULL) { (void) ParseAbsoluteGeometry(image_info->size,&image->extract_info); image->columns=image->extract_info.width; image->rows=image->extract_info.height; image->offset=image->extract_info.x; image->extract_info.x=0; image->extract_info.y=0; } if (image_info->extract != (char *) NULL) { RectangleInfo geometry; (void) memset(&geometry,0,sizeof(geometry)); flags=ParseAbsoluteGeometry(image_info->extract,&geometry); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { image->extract_info=geometry; Swap(image->columns,image->extract_info.width); Swap(image->rows,image->extract_info.height); } } image->compression=image_info->compression; image->quality=image_info->quality; image->endian=image_info->endian; image->interlace=image_info->interlace; image->units=image_info->units; if (image_info->density != (char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(image_info->density,&geometry_info); if ((flags & RhoValue) != 0) image->resolution.x=geometry_info.rho; image->resolution.y=image->resolution.x; if ((flags & SigmaValue) != 0) image->resolution.y=geometry_info.sigma; } if (image_info->page != (char *) NULL) { char *geometry; image->page=image->extract_info; geometry=GetPageGeometry(image_info->page); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } if (image_info->depth != 0) image->depth=image_info->depth; image->dither=image_info->dither; image->matte_color=image_info->matte_color; image->background_color=image_info->background_color; image->border_color=image_info->border_color; image->transparent_color=image_info->transparent_color; image->ping=image_info->ping; image->progress_monitor=image_info->progress_monitor; image->client_data=image_info->client_data; if (image_info->cache != (void *) NULL) ClonePixelCacheMethods(image->cache,image_info->cache); /* Set all global options that map to per-image settings. */ (void) SyncImageSettings(image_info,image,exception); /* Global options that are only set for new images. */ option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if ((double) image->delay > floor(geometry_info.rho+0.5)) image->delay=(size_t) CastDoubleToLong(floor( geometry_info.rho+0.5)); } else if ((flags & LessValue) != 0) { if ((double) image->delay < floor(geometry_info.rho+0.5)) image->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } else image->delay=(size_t) CastDoubleToLong(floor( geometry_info.rho+0.5)); if ((flags & SigmaValue) != 0) image->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions, MagickFalse,option); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageInfo() allocates the ImageInfo structure. % % The format of the AcquireImageInfo method is: % % ImageInfo *AcquireImageInfo(void) % */ MagickExport ImageInfo *AcquireImageInfo(void) { ImageInfo *image_info; image_info=(ImageInfo *) AcquireCriticalMemory(sizeof(*image_info)); GetImageInfo(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % The format of the AcquireNextImage method is: % % void AcquireNextImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { /* Allocate image structure. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->next=AcquireImage(image_info,exception); if (GetNextImageInList(image) == (Image *) NULL) return; (void) CopyMagickString(GetNextImageInList(image)->filename,image->filename, MagickPathExtent); if (image_info != (ImageInfo *) NULL) (void) CopyMagickString(GetNextImageInList(image)->filename, image_info->filename,MagickPathExtent); DestroyBlob(GetNextImageInList(image)); image->next->blob=ReferenceBlob(image->blob); image->next->endian=image->endian; image->next->scene=image->scene+1; image->next->previous=image; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A p p e n d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AppendImages() takes all images from the current image pointer to the end % of the image list and appends them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting effects how the image is justified in the % final image. % % The format of the AppendImages method is: % % Image *AppendImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AppendImages(const Image *images, const MagickBooleanType stack,ExceptionInfo *exception) { #define AppendImageTag "Append/Image" CacheView *append_view; Image *append_image; MagickBooleanType homogeneous_colorspace, status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; const Image *next; size_t depth, height, number_images, width; ssize_t x_offset, y, y_offset; /* Compute maximum area of appended area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); alpha_trait=images->alpha_trait; number_images=1; width=images->columns; height=images->rows; depth=images->depth; homogeneous_colorspace=MagickTrue; next=GetNextImageInList(images); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->depth > depth) depth=next->depth; if (next->colorspace != images->colorspace) homogeneous_colorspace=MagickFalse; if (next->alpha_trait != UndefinedPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; continue; } width+=next->columns; if (next->rows > height) height=next->rows; } /* Append images. */ append_image=CloneImage(images,width,height,MagickTrue,exception); if (append_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse) { append_image=DestroyImage(append_image); return((Image *) NULL); } if (homogeneous_colorspace == MagickFalse) (void) SetImageColorspace(append_image,sRGBColorspace,exception); append_image->depth=depth; append_image->alpha_trait=alpha_trait; append_image->page=images->page; (void) SetImageBackgroundColor(append_image,exception); status=MagickTrue; x_offset=0; y_offset=0; next=images; append_view=AcquireAuthenticCacheView(append_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { CacheView *image_view; MagickBooleanType proceed; SetGeometry(append_image,&geometry); GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry); if (stack != MagickFalse) x_offset-=geometry.x; else y_offset-=geometry.y; image_view=AcquireVirtualCacheView(next,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(next,next,next->rows,1) #endif for (y=0; y < (ssize_t) next->rows; y++) { MagickBooleanType sync; PixelInfo pixel; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset, next->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } GetPixelInfo(next,&pixel); for (x=0; x < (ssize_t) next->columns; x++) { GetPixelInfoPixel(next,p,&pixel); SetPixelViaPixelInfo(append_image,&pixel,q); p+=GetPixelChannels(next); q+=GetPixelChannels(append_image); } sync=SyncCacheViewAuthenticPixels(append_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (stack == MagickFalse) { x_offset+=(ssize_t) next->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) next->rows; } proceed=SetImageProgress(append_image,AppendImageTag,n,number_images); if (proceed == MagickFalse) break; next=GetNextImageInList(next); } append_view=DestroyCacheView(append_view); if (status == MagickFalse) append_image=DestroyImage(append_image); return(append_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a t c h I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CatchImageException() returns if no exceptions are found in the image % sequence, otherwise it determines the most severe exception and reports % it as a warning or error depending on the severity. % % The format of the CatchImageException method is: % % ExceptionType CatchImageException(Image *image) % % A description of each parameter follows: % % o image: An image sequence. % */ MagickExport ExceptionType CatchImageException(Image *image) { ExceptionInfo *exception; ExceptionType severity; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=AcquireExceptionInfo(); CatchException(exception); severity=exception->severity; exception=DestroyExceptionInfo(exception); return(severity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipImagePath() sets the image clip mask based any clipping path information % if it exists. % % The format of the ClipImagePath method is: % % MagickBooleanType ClipImagePath(Image *image,const char *pathname, % const MagickBooleanType inside,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception) { return(ClipImagePath(image,"#1",MagickTrue,exception)); } MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname, const MagickBooleanType inside,ExceptionInfo *exception) { #define ClipImagePathTag "ClipPath/Image" char *property; const char *value; Image *clip_mask; ImageInfo *image_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pathname != NULL); property=AcquireString(pathname); (void) FormatLocaleString(property,MagickPathExtent,"8BIM:1999,2998:%s", pathname); value=GetImageProperty(image,property,exception); property=DestroyString(property); if (value == (const char *) NULL) { ThrowFileException(exception,OptionError,"NoClipPathDefined", image->filename); return(MagickFalse); } image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->filename,image->filename, MagickPathExtent); (void) ConcatenateMagickString(image_info->filename,pathname, MagickPathExtent); clip_mask=BlobToImage(image_info,value,strlen(value),exception); image_info=DestroyImageInfo(image_info); if (clip_mask == (Image *) NULL) return(MagickFalse); if (clip_mask->storage_class == PseudoClass) { (void) SyncImage(clip_mask,exception); if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (inside == MagickFalse) (void) NegateImage(clip_mask,MagickFalse,exception); (void) FormatLocaleString(clip_mask->magick_filename,MagickPathExtent, "8BIM:1999,2998:%s\nPS",pathname); (void) SetImageMask(image,WritePixelMask,clip_mask,exception); image->mask_trait=UpdatePixelTrait; clip_mask=DestroyImage(clip_mask); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImage() copies an image and returns the copy as a new image object. % % If the specified columns and rows is 0, an exact copy of the image is % returned, otherwise the pixel data is undefined and must be initialized % with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On % failure, a NULL image is returned and exception describes the reason for the % failure. % % The format of the CloneImage method is: % % Image *CloneImage(const Image *image,const size_t columns, % const size_t rows,const MagickBooleanType orphan, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the cloned image. % % o rows: the number of rows in the cloned image. % % o detach: With a value other than 0, the cloned image is detached from % its parent I/O stream. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CloneImage(const Image *image,const size_t columns, const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception) { Image *clone_image; double scale; size_t length; /* Clone the image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns == 0) || (image->rows == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "NegativeOrZeroImageSize","`%s'",image->filename); return((Image *) NULL); } clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image)); (void) memset(clone_image,0,sizeof(*clone_image)); clone_image->signature=MagickCoreSignature; clone_image->storage_class=image->storage_class; clone_image->number_channels=image->number_channels; clone_image->number_meta_channels=image->number_meta_channels; clone_image->metacontent_extent=image->metacontent_extent; clone_image->colorspace=image->colorspace; clone_image->alpha_trait=image->alpha_trait; clone_image->channels=image->channels; clone_image->mask_trait=image->mask_trait; clone_image->columns=image->columns; clone_image->rows=image->rows; clone_image->dither=image->dither; clone_image->image_info=CloneImageInfo(image->image_info); (void) CloneImageProfiles(clone_image,image); (void) CloneImageProperties(clone_image,image); (void) CloneImageArtifacts(clone_image,image); GetTimerInfo(&clone_image->timer); if (image->ascii85 != (void *) NULL) Ascii85Initialize(clone_image); clone_image->extent=image->extent; clone_image->magick_columns=image->magick_columns; clone_image->magick_rows=image->magick_rows; clone_image->type=image->type; clone_image->channel_mask=image->channel_mask; clone_image->channel_map=ClonePixelChannelMap(image->channel_map); (void) CopyMagickString(clone_image->magick_filename,image->magick_filename, MagickPathExtent); (void) CopyMagickString(clone_image->magick,image->magick,MagickPathExtent); (void) CopyMagickString(clone_image->filename,image->filename, MagickPathExtent); clone_image->progress_monitor=image->progress_monitor; clone_image->client_data=image->client_data; clone_image->reference_count=1; clone_image->next=image->next; clone_image->previous=image->previous; clone_image->list=NewImageList(); if (detach == MagickFalse) clone_image->blob=ReferenceBlob(image->blob); else { clone_image->next=NewImageList(); clone_image->previous=NewImageList(); clone_image->blob=CloneBlobInfo((BlobInfo *) NULL); } clone_image->ping=image->ping; clone_image->debug=IsEventLogging(); clone_image->semaphore=AcquireSemaphoreInfo(); if (image->colormap != (PixelInfo *) NULL) { /* Allocate and copy the image colormap. */ clone_image->colors=image->colors; length=(size_t) image->colors; clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length+1, sizeof(*clone_image->colormap)); if (clone_image->colormap == (PixelInfo *) NULL) { clone_image=DestroyImage(clone_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memcpy(clone_image->colormap,image->colormap,length* sizeof(*clone_image->colormap)); } if ((columns == 0) || (rows == 0)) { if (image->montage != (char *) NULL) (void) CloneString(&clone_image->montage,image->montage); if (image->directory != (char *) NULL) (void) CloneString(&clone_image->directory,image->directory); clone_image->cache=ReferencePixelCache(image->cache); return(clone_image); } scale=1.0; if (image->columns != 0) scale=(double) columns/(double) image->columns; clone_image->page.width=(size_t) CastDoubleToLong(floor(scale* image->page.width+0.5)); clone_image->page.x=CastDoubleToLong(ceil(scale*image->page.x-0.5)); clone_image->tile_offset.x=CastDoubleToLong(ceil(scale* image->tile_offset.x-0.5)); scale=1.0; if (image->rows != 0) scale=(double) rows/(double) image->rows; clone_image->page.height=(size_t) CastDoubleToLong(floor(scale* image->page.height+0.5)); clone_image->page.y=CastDoubleToLong(ceil(scale*image->page.y-0.5)); clone_image->tile_offset.y=CastDoubleToLong(ceil(scale* image->tile_offset.y-0.5)); clone_image->cache=ClonePixelCache(image->cache); if (SetImageExtent(clone_image,columns,rows,exception) == MagickFalse) clone_image=DestroyImage(clone_image); return(clone_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageInfo() makes a copy of the given image info structure. If % NULL is specified, a new image info structure is created initialized to % default values. % % The format of the CloneImageInfo method is: % % ImageInfo *CloneImageInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info) { ImageInfo *clone_info; clone_info=AcquireImageInfo(); if (image_info == (ImageInfo *) NULL) return(clone_info); clone_info->compression=image_info->compression; clone_info->temporary=image_info->temporary; clone_info->adjoin=image_info->adjoin; clone_info->antialias=image_info->antialias; clone_info->scene=image_info->scene; clone_info->number_scenes=image_info->number_scenes; clone_info->depth=image_info->depth; if (image_info->size != (char *) NULL) (void) CloneString(&clone_info->size,image_info->size); if (image_info->extract != (char *) NULL) (void) CloneString(&clone_info->extract,image_info->extract); if (image_info->scenes != (char *) NULL) (void) CloneString(&clone_info->scenes,image_info->scenes); if (image_info->page != (char *) NULL) (void) CloneString(&clone_info->page,image_info->page); clone_info->interlace=image_info->interlace; clone_info->endian=image_info->endian; clone_info->units=image_info->units; clone_info->quality=image_info->quality; if (image_info->sampling_factor != (char *) NULL) (void) CloneString(&clone_info->sampling_factor, image_info->sampling_factor); if (image_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,image_info->server_name); if (image_info->font != (char *) NULL) (void) CloneString(&clone_info->font,image_info->font); if (image_info->texture != (char *) NULL) (void) CloneString(&clone_info->texture,image_info->texture); if (image_info->density != (char *) NULL) (void) CloneString(&clone_info->density,image_info->density); clone_info->pointsize=image_info->pointsize; clone_info->fuzz=image_info->fuzz; clone_info->matte_color=image_info->matte_color; clone_info->background_color=image_info->background_color; clone_info->border_color=image_info->border_color; clone_info->transparent_color=image_info->transparent_color; clone_info->dither=image_info->dither; clone_info->monochrome=image_info->monochrome; clone_info->colorspace=image_info->colorspace; clone_info->type=image_info->type; clone_info->orientation=image_info->orientation; clone_info->ping=image_info->ping; clone_info->verbose=image_info->verbose; clone_info->progress_monitor=image_info->progress_monitor; clone_info->client_data=image_info->client_data; clone_info->cache=image_info->cache; if (image_info->cache != (void *) NULL) clone_info->cache=ReferencePixelCache(image_info->cache); if (image_info->profile != (void *) NULL) clone_info->profile=(void *) CloneStringInfo((StringInfo *) image_info->profile); SetImageInfoFile(clone_info,image_info->file); SetImageInfoBlob(clone_info,image_info->blob,image_info->length); clone_info->stream=image_info->stream; clone_info->custom_stream=image_info->custom_stream; (void) CopyMagickString(clone_info->magick,image_info->magick, MagickPathExtent); (void) CopyMagickString(clone_info->unique,image_info->unique, MagickPathExtent); (void) CopyMagickString(clone_info->filename,image_info->filename, MagickPathExtent); clone_info->channel=image_info->channel; (void) CloneImageOptions(clone_info,image_info); clone_info->debug=IsEventLogging(); clone_info->signature=image_info->signature; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o p y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CopyImagePixels() copies pixels from the source image as defined by the % geometry the destination image at the specified offset. % % The format of the CopyImagePixels method is: % % MagickBooleanType CopyImagePixels(Image *image,const Image *source_image, % const RectangleInfo *geometry,const OffsetInfo *offset, % ExceptionInfo *exception); % % A description of each parameter follows: % % o image: the destination image. % % o source_image: the source image. % % o geometry: define the dimensions of the source pixel rectangle. % % o offset: define the offset in the destination image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CopyImagePixels(Image *image, const Image *source_image,const RectangleInfo *geometry, const OffsetInfo *offset,ExceptionInfo *exception) { #define CopyImageTag "Copy/Image" CacheView *image_view, *source_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(source_image != (Image *) NULL); assert(geometry != (RectangleInfo *) NULL); assert(offset != (OffsetInfo *) NULL); if ((offset->x < 0) || (offset->y < 0) || ((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) || ((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows)) ThrowBinaryException(OptionError,"GeometryDoesNotContainImage", image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); /* Copy image pixels. */ status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,source_image,geometry->height,1) #endif for (y=0; y < (ssize_t) geometry->height; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y, geometry->width,1,exception); q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y, geometry->width,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) geometry->width; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0) || (source_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CopyImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImage() dereferences an image, deallocating memory associated with % the image if the reference count becomes zero. % % The format of the DestroyImage method is: % % Image *DestroyImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *DestroyImage(Image *image) { MagickBooleanType destroy; /* Dereference image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); destroy=MagickFalse; LockSemaphoreInfo(image->semaphore); image->reference_count--; if (image->reference_count == 0) destroy=MagickTrue; UnlockSemaphoreInfo(image->semaphore); if (destroy == MagickFalse) return((Image *) NULL); /* Destroy image. */ DestroyImagePixels(image); image->channel_map=DestroyPixelChannelMap(image->channel_map); if (image->montage != (char *) NULL) image->montage=DestroyString(image->montage); if (image->directory != (char *) NULL) image->directory=DestroyString(image->directory); if (image->colormap != (PixelInfo *) NULL) image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); if (image->geometry != (char *) NULL) image->geometry=DestroyString(image->geometry); DestroyImageProfiles(image); DestroyImageProperties(image); DestroyImageArtifacts(image); if (image->ascii85 != (Ascii85Info *) NULL) image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85); if (image->image_info != (ImageInfo *) NULL) image->image_info=DestroyImageInfo(image->image_info); DestroyBlob(image); if (image->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&image->semaphore); image->signature=(~MagickCoreSignature); image=(Image *) RelinquishMagickMemory(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageInfo() deallocates memory associated with an ImageInfo % structure. % % The format of the DestroyImageInfo method is: % % ImageInfo *DestroyImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); if (image_info->scenes != (char *) NULL) image_info->scenes=DestroyString(image_info->scenes); if (image_info->page != (char *) NULL) image_info->page=DestroyString(image_info->page); if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); if (image_info->cache != (void *) NULL) image_info->cache=DestroyPixelCache(image_info->cache); if (image_info->profile != (StringInfo *) NULL) image_info->profile=(void *) DestroyStringInfo((StringInfo *) image_info->profile); DestroyImageOptions(image_info); image_info->signature=(~MagickCoreSignature); image_info=(ImageInfo *) RelinquishMagickMemory(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s a s s o c i a t e I m a g e S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DisassociateImageStream() disassociates the image stream. It checks if the % blob of the specified image is referenced by other images. If the reference % count is higher then 1 a new blob is assigned to the specified image. % % The format of the DisassociateImageStream method is: % % void DisassociateImageStream(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DisassociateImageStream(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); DisassociateBlob(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfo() initializes image_info to default values. % % The format of the GetImageInfo method is: % % void GetImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport void GetImageInfo(ImageInfo *image_info) { char *synchronize; ExceptionInfo *exception; /* File and image dimension members. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info != (ImageInfo *) NULL); (void) memset(image_info,0,sizeof(*image_info)); image_info->adjoin=MagickTrue; image_info->interlace=NoInterlace; image_info->channel=DefaultChannels; image_info->quality=UndefinedCompressionQuality; image_info->antialias=MagickTrue; image_info->dither=MagickTrue; synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { image_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } exception=AcquireExceptionInfo(); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image_info->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance, &image_info->border_color,exception); (void) QueryColorCompliance(MatteColor,AllCompliance,&image_info->matte_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image_info->transparent_color,exception); exception=DestroyExceptionInfo(exception); image_info->debug=IsEventLogging(); image_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfoFile() returns the image info file member. % % The format of the GetImageInfoFile method is: % % FILE *GetImageInfoFile(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info) { return(image_info->file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMask() returns the mask associated with the image. % % The format of the GetImageMask method is: % % Image *GetImageMask(const Image *image,const PixelMask type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % */ MagickExport Image *GetImageMask(const Image *image,const PixelMask type, ExceptionInfo *exception) { CacheView *mask_view, *image_view; Image *mask_image; MagickBooleanType status; ssize_t y; /* Get image mask. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); switch (type) { case ReadPixelMask: { if ((image->channels & ReadMaskChannel) == 0) return((Image *) NULL); break; } case WritePixelMask: { if ((image->channels & WriteMaskChannel) == 0) return((Image *) NULL); break; } default: { if ((image->channels & CompositeMaskChannel) == 0) return((Image *) NULL); break; } } mask_image=AcquireImage((ImageInfo *) NULL,exception); status=SetImageExtent(mask_image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(mask_image)); status=MagickTrue; mask_image->alpha_trait=UndefinedPixelTrait; (void) SetImageColorspace(mask_image,GRAYColorspace,exception); image_view=AcquireVirtualCacheView(image,exception); mask_view=AcquireAuthenticCacheView(mask_image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { switch (type) { case ReadPixelMask: { SetPixelGray(mask_image,GetPixelReadMask(image,p),q); break; } case WritePixelMask: { SetPixelGray(mask_image,GetPixelWriteMask(image,p),q); break; } default: { SetPixelGray(mask_image,GetPixelCompositeMask(image,p),q); break; } } p+=GetPixelChannels(image); q+=GetPixelChannels(mask_image); } if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse) status=MagickFalse; } mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) mask_image=DestroyImage(mask_image); return(mask_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e R e f e r e n c e C o u n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageReferenceCount() returns the image reference count. % % The format of the GetReferenceCount method is: % % ssize_t GetImageReferenceCount(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ssize_t GetImageReferenceCount(Image *image) { ssize_t reference_count; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); LockSemaphoreInfo(image->semaphore); reference_count=image->reference_count; UnlockSemaphoreInfo(image->semaphore); return(reference_count); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageVirtualPixelMethod() gets the "virtual pixels" method for the % image. A virtual pixel is any pixel access that is outside the boundaries % of the image cache. % % The format of the GetImageVirtualPixelMethod() method is: % % VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(GetPixelCacheVirtualMethod(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageFilename() interprets embedded characters in an image filename. % The filename length is returned. % % The format of the InterpretImageFilename method is: % % size_t InterpretImageFilename(const ImageInfo *image_info,Image *image, % const char *format,int value,char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info.. % % o image: the image. % % o format: A filename describing the format to use to write the numeric % argument. Only the first numeric format identifier is replaced. % % o value: Numeric value to substitute into format filename. % % o filename: return the formatted filename in this character buffer. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t InterpretImageFilename(const ImageInfo *image_info, Image *image,const char *format,int value,char *filename, ExceptionInfo *exception) { char *q; int c; MagickBooleanType canonical; const char *p; ssize_t field_width, offset; canonical=MagickFalse; offset=0; (void) CopyMagickString(filename,format,MagickPathExtent); for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%')) { q=(char *) p+1; if (*q == '%') { p=q+1; continue; } field_width=0; if (*q == '0') field_width=(ssize_t) strtol(q,&q,10); switch (*q) { case 'd': case 'o': case 'x': { q++; c=(*q); *q='\0'; (void) FormatLocaleString(filename+(p-format-offset),(size_t) (MagickPathExtent-(p-format-offset)),p,value); offset+=(4-field_width); *q=c; (void) ConcatenateMagickString(filename,q,MagickPathExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } case '[': { char pattern[MagickPathExtent]; const char *option; char *r; ssize_t i; ssize_t depth; /* Image option. */ if (strchr(p,']') == (char *) NULL) break; depth=1; r=q+1; for (i=0; (i < (MagickPathExtent-1L)) && (*r != '\0'); i++) { if (*r == '[') depth++; if (*r == ']') depth--; if (depth <= 0) break; pattern[i]=(*r++); } pattern[i]='\0'; if (LocaleNCompare(pattern,"filename:",9) != 0) break; option=(const char *) NULL; if (image != (Image *) NULL) option=GetImageProperty(image,pattern,exception); if ((option == (const char *) NULL) && (image != (Image *) NULL)) option=GetImageArtifact(image,pattern); if ((option == (const char *) NULL) && (image_info != (ImageInfo *) NULL)) option=GetImageOption(image_info,pattern); if (option == (const char *) NULL) break; q--; c=(*q); *q='\0'; (void) CopyMagickString(filename+(p-format-offset),option,(size_t) (MagickPathExtent-(p-format-offset))); offset+=strlen(pattern)-strlen(option)+3; *q=c; (void) ConcatenateMagickString(filename,r+1,MagickPathExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } default: break; } } if (canonical == MagickFalse) (void) CopyMagickString(filename,format,MagickPathExtent); else for (q=filename; *q != '\0'; q++) if ((*q == '%') && (*(q+1) == '%')) (void) CopyMagickString(q,q+1,(size_t) (MagickPathExtent-(q-filename))); return(strlen(filename)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s H i g h D y n a m i c R a n g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsHighDynamicRangeImage() returns MagickTrue if any pixel component is % non-integer or exceeds the bounds of the quantum depth (e.g. for Q16 % 0..65535. % % The format of the IsHighDynamicRangeImage method is: % % MagickBooleanType IsHighDynamicRangeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image, ExceptionInfo *exception) { #if !defined(MAGICKCORE_HDRI_SUPPORT) (void) image; (void) exception; return(MagickFalse); #else CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelTrait traits; traits=GetPixelChannelTraits(image,(PixelChannel) i); if (traits == UndefinedPixelTrait) continue; pixel=(double) p[i]; if ((pixel < 0.0) || (pixel > QuantumRange) || (pixel != (double) ((QuantumAny) pixel))) break; } p+=GetPixelChannels(image); if (i < (ssize_t) GetPixelChannels(image)) status=MagickFalse; } if (x < (ssize_t) image->columns) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status != MagickFalse ? MagickFalse : MagickTrue); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O b j e c t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageObject() returns MagickTrue if the image sequence contains a valid % set of image objects. % % The format of the IsImageObject method is: % % MagickBooleanType IsImageObject(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageObject(const Image *image) { const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) if (p->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTaintImage() returns MagickTrue any pixel in the image has been altered % since it was first constituted. % % The format of the IsTaintImage method is: % % MagickBooleanType IsTaintImage(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsTaintImage(const Image *image) { char magick[MagickPathExtent], filename[MagickPathExtent]; const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); (void) CopyMagickString(magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { if (p->taint != MagickFalse) return(MagickTrue); if (LocaleCompare(p->magick,magick) != 0) return(MagickTrue); if (LocaleCompare(p->filename,filename) != 0) return(MagickTrue); } return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModifyImage() ensures that there is only a single reference to the image % to be modified, updating the provided image pointer to point to a clone of % the original image if necessary. % % The format of the ModifyImage method is: % % MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ModifyImage(Image **image, ExceptionInfo *exception) { Image *clone_image; assert(image != (Image **) NULL); assert(*image != (Image *) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (GetImageReferenceCount(*image) <= 1) return(MagickTrue); clone_image=CloneImage(*image,0,0,MagickTrue,exception); LockSemaphoreInfo((*image)->semaphore); (*image)->reference_count--; UnlockSemaphoreInfo((*image)->semaphore); *image=clone_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w M a g i c k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewMagickImage() creates a blank image canvas of the specified size and % background color. % % The format of the NewMagickImage method is: % % Image *NewMagickImage(const ImageInfo *image_info,const size_t width, % const size_t height,const PixelInfo *background, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the image width. % % o height: the image height. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *NewMagickImage(const ImageInfo *image_info, const size_t width,const size_t height,const PixelInfo *background, ExceptionInfo *exception) { CacheView *image_view; Image *image; MagickBooleanType status; ssize_t y; assert(image_info != (const ImageInfo *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info->signature == MagickCoreSignature); assert(background != (const PixelInfo *) NULL); image=AcquireImage(image_info,exception); image->columns=width; image->rows=height; image->colorspace=background->colorspace; image->alpha_trait=background->alpha_trait; image->fuzz=background->fuzz; image->depth=background->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e f e r e n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferenceImage() increments the reference count associated with an image % returning a pointer to the image. % % The format of the ReferenceImage method is: % % Image *ReferenceImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *ReferenceImage(Image *image) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); LockSemaphoreInfo(image->semaphore); image->reference_count++; UnlockSemaphoreInfo(image->semaphore); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePage() resets the image page canvas and position. % % The format of the ResetImagePage method is: % % MagickBooleanType ResetImagePage(Image *image,const char *page) % % A description of each parameter follows: % % o image: the image. % % o page: the relative page specification. % */ MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page) { MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseAbsoluteGeometry(page,&geometry); if ((flags & WidthValue) != 0) { if ((flags & HeightValue) == 0) geometry.height=geometry.width; image->page.width=geometry.width; image->page.height=geometry.height; } if ((flags & AspectValue) != 0) { if ((flags & XValue) != 0) image->page.x+=geometry.x; if ((flags & YValue) != 0) image->page.y+=geometry.y; } else { if ((flags & XValue) != 0) { image->page.x=geometry.x; if ((image->page.width == 0) && (geometry.x > 0)) image->page.width=image->columns+geometry.x; } if ((flags & YValue) != 0) { image->page.y=geometry.y; if ((image->page.height == 0) && (geometry.y > 0)) image->page.height=image->rows+geometry.y; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePixels() reset the image pixels, that is, all the pixel components % are zereod. % % The format of the SetImage method is: % % MagickBooleanType ResetImagePixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ResetImagePixels(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; size_t length; ssize_t y; void *pixels; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); pixels=AcquirePixelCachePixels(image,&length,exception); if (pixels != (void *) NULL) { /* Reset in-core image pixels. */ (void) memset(pixels,0,length); return(MagickTrue); } /* Reset image pixels. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { (void) memset(q,0,GetPixelChannels(image)*sizeof(Quantum)); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlpha() sets the alpha levels of the image. % % The format of the SetImageAlpha method is: % % MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha: the level of transparency: 0 is fully transparent and QuantumRange % is fully opaque. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) > (QuantumRange/2)) SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e B a c k g r o u n d C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageBackgroundColor() initializes the image pixels to the image % background color. The background color is defined by the background_color % member of the image structure. % % The format of the SetImage method is: % % MagickBooleanType SetImageBackgroundColor(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageBackgroundColor(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo background; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlphaChannel(image,OnAlphaChannel,exception); ConformPixelInfo(image,&image->background_color,&background,exception); /* Set image background color. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannelMask() sets the image channel mask from the specified channel % mask. % % The format of the SetImageChannelMask method is: % % ChannelType SetImageChannelMask(Image *image, % const ChannelType channel_mask) % % A description of each parameter follows: % % o image: the image. % % o channel_mask: the channel mask. % */ MagickExport ChannelType SetImageChannelMask(Image *image, const ChannelType channel_mask) { return(SetPixelChannelMask(image,channel_mask)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColor() set the entire image canvas to the specified color. % % The format of the SetImageColor method is: % % MagickBooleanType SetImageColor(Image *image,const PixelInfo *color, % ExeptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColor(Image *image, const PixelInfo *color,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); assert(color != (const PixelInfo *) NULL); image->colorspace=color->colorspace; image->alpha_trait=color->alpha_trait; image->fuzz=color->fuzz; image->depth=color->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,color,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageStorageClass() sets the image class: DirectClass for true color % images or PseudoClass for colormapped images. % % The format of the SetImageStorageClass method is: % % MagickBooleanType SetImageStorageClass(Image *image, % const ClassType storage_class,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o storage_class: The image class. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageStorageClass(Image *image, const ClassType storage_class,ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image->storage_class=storage_class; return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageExtent() sets the image size (i.e. columns & rows). % % The format of the SetImageExtent method is: % % MagickBooleanType SetImageExtent(Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: The image width in pixels. % % o rows: The image height in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { if ((columns == 0) || (rows == 0)) ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename); image->columns=columns; image->rows=rows; if (image->depth == 0) { image->depth=8; (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageDepthNotSupported","`%s'",image->filename); } if (image->depth > (8*sizeof(MagickSizeType))) { image->depth=8*sizeof(MagickSizeType); (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageDepthNotSupported","`%s'",image->filename); } return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfo() initializes the 'magick' field of the ImageInfo structure. % It is set to a type of image format based on the prefix or suffix of the % filename. For example, 'ps:image' returns PS indicating a Postscript image. % JPEG is returned for this filename: 'image.jpg'. The filename prefix has % precendence over the suffix. Use an optional index enclosed in brackets % after a file name to specify a desired scene of a multi-resolution image % format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value % indicates success. % % The format of the SetImageInfo method is: % % MagickBooleanType SetImageInfo(ImageInfo *image_info, % const unsigned int frames,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o frames: the number of images you intend to write. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info, const unsigned int frames,ExceptionInfo *exception) { char component[MagickPathExtent], magic[MagickPathExtent], path[MagickPathExtent], *q; const MagicInfo *magic_info; const MagickInfo *magick_info; ExceptionInfo *sans_exception; Image *image; MagickBooleanType status; const char *p; ssize_t count; /* Look for 'image.format' in filename. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); *component='\0'; GetPathComponent(image_info->filename,SubimagePath,component); if (*component != '\0') { /* Look for scene specification (e.g. img0001.pcd[4]). */ if (IsSceneGeometry(component,MagickFalse) == MagickFalse) { if (IsGeometry(component) != MagickFalse) (void) CloneString(&image_info->extract,component); } else { size_t first, last; (void) CloneString(&image_info->scenes,component); image_info->scene=StringToUnsignedLong(image_info->scenes); image_info->number_scenes=image_info->scene; p=image_info->scenes; for (q=(char *) image_info->scenes; *q != '\0'; p++) { while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; first=(size_t) strtol(p,&q,10); last=first; while (isspace((int) ((unsigned char) *q)) != 0) q++; if (*q == '-') last=(size_t) strtol(q+1,&q,10); if (first > last) Swap(first,last); if (first < image_info->scene) image_info->scene=first; if (last > image_info->number_scenes) image_info->number_scenes=last; p=q; } image_info->number_scenes-=image_info->scene-1; } } *component='\0'; if (*image_info->magick == '\0') GetPathComponent(image_info->filename,ExtensionPath,component); if (*component != '\0') { /* Base path sans any compression extension. */ GetPathComponent(image_info->filename,BasePathSansCompressExtension,path); GetPathComponent(path,ExtensionPath,component); } image_info->affirm=MagickFalse; sans_exception=AcquireExceptionInfo(); if ((*component != '\0') && (IsGlob(component) == MagickFalse)) { MagickFormatType format_type; ssize_t i; static const char *format_type_formats[] = { "AUTOTRACE", "BROWSE", "DCRAW", "EDIT", "LAUNCH", "MPEG:DECODE", "MPEG:ENCODE", "PRINT", "PS:ALPHA", "PS:CMYK", "PS:COLOR", "PS:GRAY", "PS:MONO", "SCAN", "SHOW", "WIN", (char *) NULL }; /* User specified image format. */ (void) CopyMagickString(magic,component,MagickPathExtent); LocaleUpper(magic); /* Look for explicit image formats. */ format_type=UndefinedFormatType; magick_info=GetMagickInfo(magic,sans_exception); if ((magick_info != (const MagickInfo *) NULL) && (magick_info->format_type != UndefinedFormatType)) format_type=magick_info->format_type; i=0; while ((format_type == UndefinedFormatType) && (format_type_formats[i] != (char *) NULL)) { if ((*magic == *format_type_formats[i]) && (LocaleCompare(magic,format_type_formats[i]) == 0)) format_type=ExplicitFormatType; i++; } if (format_type == UndefinedFormatType) (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); else if (format_type == ExplicitFormatType) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); } if (LocaleCompare(magic,"RGB") == 0) image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */ } /* Look for explicit 'format:image' in filename. */ *magic='\0'; GetPathComponent(image_info->filename,MagickPath,magic); if (*magic == '\0') { (void) CopyMagickString(magic,image_info->magick,MagickPathExtent); magick_info=GetMagickInfo(magic,sans_exception); if (frames == 0) GetPathComponent(image_info->filename,CanonicalPath,component); else GetPathComponent(image_info->filename,SubcanonicalPath,component); (void) CopyMagickString(image_info->filename,component,MagickPathExtent); } else { const DelegateInfo *delegate_info; /* User specified image format. */ LocaleUpper(magic); magick_info=GetMagickInfo(magic,sans_exception); delegate_info=GetDelegateInfo(magic,"*",sans_exception); if (delegate_info == (const DelegateInfo *) NULL) delegate_info=GetDelegateInfo("*",magic,sans_exception); if (((magick_info != (const MagickInfo *) NULL) || (delegate_info != (const DelegateInfo *) NULL)) && (IsMagickConflict(magic) == MagickFalse)) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); GetPathComponent(image_info->filename,CanonicalPath,component); (void) CopyMagickString(image_info->filename,component, MagickPathExtent); } } sans_exception=DestroyExceptionInfo(sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; if ((image_info->adjoin != MagickFalse) && (frames > 1)) { /* Test for multiple image support (e.g. image%02d.png). */ (void) InterpretImageFilename(image_info,(Image *) NULL, image_info->filename,(int) image_info->scene,component,exception); if ((LocaleCompare(component,image_info->filename) != 0) && (strchr(component,'%') == (char *) NULL)) image_info->adjoin=MagickFalse; } if ((image_info->adjoin != MagickFalse) && (frames > 0)) { /* Some image formats do not support multiple frames per file. */ magick_info=GetMagickInfo(magic,exception); if (magick_info != (const MagickInfo *) NULL) if (GetMagickAdjoin(magick_info) == MagickFalse) image_info->adjoin=MagickFalse; } if (image_info->affirm != MagickFalse) return(MagickTrue); if (frames == 0) { unsigned char *magick; size_t magick_size; /* Determine the image format from the first few bytes of the file. */ magick_size=GetMagicPatternExtent(exception); if (magick_size == 0) return(MagickFalse); image=AcquireImage(image_info,exception); (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } if ((IsBlobSeekable(image) == MagickFalse) || (IsBlobExempt(image) != MagickFalse)) { /* Copy image to seekable temporary file. */ *component='\0'; status=ImageToFile(image,component,exception); (void) CloseBlob(image); if (status == MagickFalse) { (void) RelinquishUniqueFileResource(component); image=DestroyImage(image); return(MagickFalse); } SetImageInfoFile(image_info,(FILE *) NULL); (void) CopyMagickString(image->filename,component,MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { (void) RelinquishUniqueFileResource(component); image=DestroyImage(image); return(MagickFalse); } (void) CopyMagickString(image_info->filename,component, MagickPathExtent); image_info->temporary=MagickTrue; } magick=(unsigned char *) AcquireQuantumMemory(1,magick_size); if (magick == (unsigned char *) NULL) { (void) CloseBlob(image); image=DestroyImage(image); return(MagickFalse); } (void) memset(magick,0,magick_size); count=ReadBlob(image,magick_size,magick); (void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR); (void) CloseBlob(image); image=DestroyImage(image); /* Check magic cache. */ sans_exception=AcquireExceptionInfo(); magic_info=GetMagicInfo(magick,(size_t) count,sans_exception); magick=(unsigned char *) RelinquishMagickMemory(magick); if ((magic_info != (const MagicInfo *) NULL) && (GetMagicName(magic_info) != (char *) NULL)) { /* Try to use magick_info that was determined earlier by the extension */ if ((magick_info != (const MagickInfo *) NULL) && (GetMagickUseExtension(magick_info) != MagickFalse) && (LocaleCompare(magick_info->magick_module,GetMagicName( magic_info)) == 0)) (void) CopyMagickString(image_info->magick,magick_info->name, MagickPathExtent); else { (void) CopyMagickString(image_info->magick,GetMagicName( magic_info),MagickPathExtent); magick_info=GetMagickInfo(image_info->magick,sans_exception); } if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); return(MagickTrue); } magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoBlob() sets the image info blob member. % % The format of the SetImageInfoBlob method is: % % void SetImageInfoBlob(ImageInfo *image_info,const void *blob, % const size_t length) % % A description of each parameter follows: % % o image_info: the image info. % % o blob: the blob. % % o length: the blob length. % */ MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob, const size_t length) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->blob=(void *) blob; image_info->length=length; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o C u s t o m S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoCustomStream() sets the image info custom stream handlers. % % The format of the SetImageInfoCustomStream method is: % % void SetImageInfoCustomStream(ImageInfo *image_info, % CustomStreamInfo *custom_stream) % % A description of each parameter follows: % % o image_info: the image info. % % o custom_stream: your custom stream methods. % */ MagickExport void SetImageInfoCustomStream(ImageInfo *image_info, CustomStreamInfo *custom_stream) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->custom_stream=(CustomStreamInfo *) custom_stream; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoFile() sets the image info file member. % % The format of the SetImageInfoFile method is: % % void SetImageInfoFile(ImageInfo *image_info,FILE *file) % % A description of each parameter follows: % % o image_info: the image info. % % o file: the file. % */ MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->file=file; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMask() associates a mask with the image. The mask must be the same % dimensions as the image. % % The format of the SetImageMask method is: % % MagickBooleanType SetImageMask(Image *image,const PixelMask type, % const Image *mask,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % % o mask: the image mask. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMask(Image *image,const PixelMask type, const Image *mask,ExceptionInfo *exception) { CacheView *mask_view, *image_view; MagickBooleanType status; ssize_t y; /* Set image mask. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (mask == (const Image *) NULL) { switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels & ~ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels & ~WriteMaskChannel); } default: { image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel); break; } } return(SyncImagePixelCache(image,exception)); } switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels | ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels | WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels | CompositeMaskChannel); break; } } if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; image->mask_trait=UpdatePixelTrait; mask_view=AcquireVirtualCacheView(mask,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(mask,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity; intensity=0.0; if ((x < (ssize_t) mask->columns) && (y < (ssize_t) mask->rows)) intensity=GetPixelIntensity(mask,p); switch (type) { case ReadPixelMask: { SetPixelReadMask(image,ClampToQuantum(intensity),q); break; } case WritePixelMask: { SetPixelWriteMask(image,ClampToQuantum(intensity),q); break; } default: { SetPixelCompositeMask(image,ClampToQuantum(intensity),q); break; } } p+=GetPixelChannels(mask); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image->mask_trait=UndefinedPixelTrait; mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e R e g i o n M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageRegionMask() associates a mask with the image as defined by the % specified region. % % The format of the SetImageRegionMask method is: % % MagickBooleanType SetImageRegionMask(Image *image,const PixelMask type, % const RectangleInfo *region,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % % o geometry: the mask region. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageRegionMask(Image *image, const PixelMask type,const RectangleInfo *region,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; /* Set image mask as defined by the region. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (region == (const RectangleInfo *) NULL) { switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels & ~ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels & ~WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel); break; } } return(SyncImagePixelCache(image,exception)); } switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels | ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels | WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels | CompositeMaskChannel); break; } } if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; image->mask_trait=UpdatePixelTrait; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum pixel; pixel=QuantumRange; if (((x >= region->x) && (x < (region->x+(ssize_t) region->width))) && ((y >= region->y) && (y < (region->y+(ssize_t) region->height)))) pixel=(Quantum) 0; switch (type) { case ReadPixelMask: { SetPixelReadMask(image,pixel,q); break; } case WritePixelMask: { SetPixelWriteMask(image,pixel,q); break; } default: { SetPixelCompositeMask(image,pixel,q); break; } } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image->mask_trait=UndefinedPixelTrait; image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageVirtualPixelMethod() sets the "virtual pixels" method for the % image and returns the previous setting. A virtual pixel is any pixel access % that is outside the boundaries of the image cache. % % The format of the SetImageVirtualPixelMethod() method is: % % VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S m u s h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SmushImages() takes all images from the current image pointer to the end % of the image list and smushes them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the SmushImages method is: % % Image *SmushImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o offset: minimum distance in pixels between images. % % o exception: return any errors or warnings in this structure. % */ static ssize_t SmushXGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *left_view, *right_view; const Image *left_image, *right_image; RectangleInfo left_geometry, right_geometry; const Quantum *p; ssize_t i, y; size_t gap; ssize_t x; if (images->previous == (Image *) NULL) return(0); right_image=images; SetGeometry(smush_image,&right_geometry); GravityAdjustGeometry(right_image->columns,right_image->rows, right_image->gravity,&right_geometry); left_image=images->previous; SetGeometry(smush_image,&left_geometry); GravityAdjustGeometry(left_image->columns,left_image->rows, left_image->gravity,&left_geometry); gap=right_image->columns; left_view=AcquireVirtualCacheView(left_image,exception); right_view=AcquireVirtualCacheView(right_image,exception); for (y=0; y < (ssize_t) smush_image->rows; y++) { for (x=(ssize_t) left_image->columns-1; x > 0; x--) { p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(left_image,p) != TransparentAlpha) || ((left_image->columns-x-1) >= gap)) break; } i=(ssize_t) left_image->columns-x-1; for (x=0; x < (ssize_t) right_image->columns; x++) { p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(right_image,p) != TransparentAlpha) || ((x+i) >= (ssize_t) gap)) break; } if ((x+i) < (ssize_t) gap) gap=(size_t) (x+i); } right_view=DestroyCacheView(right_view); left_view=DestroyCacheView(left_view); if (y < (ssize_t) smush_image->rows) return(offset); return((ssize_t) gap-offset); } static ssize_t SmushYGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *bottom_view, *top_view; const Image *bottom_image, *top_image; RectangleInfo bottom_geometry, top_geometry; const Quantum *p; ssize_t i, x; size_t gap; ssize_t y; if (images->previous == (Image *) NULL) return(0); bottom_image=images; SetGeometry(smush_image,&bottom_geometry); GravityAdjustGeometry(bottom_image->columns,bottom_image->rows, bottom_image->gravity,&bottom_geometry); top_image=images->previous; SetGeometry(smush_image,&top_geometry); GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity, &top_geometry); gap=bottom_image->rows; top_view=AcquireVirtualCacheView(top_image,exception); bottom_view=AcquireVirtualCacheView(bottom_image,exception); for (x=0; x < (ssize_t) smush_image->columns; x++) { for (y=(ssize_t) top_image->rows-1; y > 0; y--) { p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(top_image,p) != TransparentAlpha) || ((top_image->rows-y-1) >= gap)) break; } i=(ssize_t) top_image->rows-y-1; for (y=0; y < (ssize_t) bottom_image->rows; y++) { p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(bottom_image,p) != TransparentAlpha) || ((y+i) >= (ssize_t) gap)) break; } if ((y+i) < (ssize_t) gap) gap=(size_t) (y+i); } bottom_view=DestroyCacheView(bottom_view); top_view=DestroyCacheView(top_view); if (x < (ssize_t) smush_image->columns) return(offset); return((ssize_t) gap-offset); } MagickExport Image *SmushImages(const Image *images, const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception) { #define SmushImageTag "Smush/Image" const Image *image; Image *smush_image; MagickBooleanType proceed, status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; const Image *next; size_t height, number_images, width; ssize_t x_offset, y_offset; /* Compute maximum area of smushed area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; alpha_trait=image->alpha_trait; number_images=1; width=image->columns; height=image->rows; next=GetNextImageInList(image); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->alpha_trait != UndefinedPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; if (next->previous != (Image *) NULL) height+=offset; continue; } width+=next->columns; if (next->previous != (Image *) NULL) width+=offset; if (next->rows > height) height=next->rows; } /* Smush images. */ smush_image=CloneImage(image,width,height,MagickTrue,exception); if (smush_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse) { smush_image=DestroyImage(smush_image); return((Image *) NULL); } smush_image->alpha_trait=alpha_trait; (void) SetImageBackgroundColor(smush_image,exception); status=MagickTrue; x_offset=0; y_offset=0; for (n=0; n < (MagickOffsetType) number_images; n++) { SetGeometry(smush_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) { x_offset-=geometry.x; y_offset-=SmushYGap(smush_image,image,offset,exception); } else { x_offset-=SmushXGap(smush_image,image,offset,exception); y_offset-=geometry.y; } status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset, y_offset,exception); proceed=SetImageProgress(image,SmushImageTag,n,number_images); if (proceed == MagickFalse) break; if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=GetNextImageInList(image); } if (stack == MagickFalse) smush_image->columns=(size_t) x_offset; else smush_image->rows=(size_t) y_offset; if (status == MagickFalse) smush_image=DestroyImage(smush_image); return(smush_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StripImage() strips an image of all profiles and comments. % % The format of the StripImage method is: % % MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); (void) exception; DestroyImageProfiles(image); (void) DeleteImageProperty(image,"comment"); (void) DeleteImageProperty(image,"date:create"); (void) DeleteImageProperty(image,"date:modify"); status=SetImageArtifact(image,"png:exclude-chunk", "bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImage() initializes the red, green, and blue intensities of each pixel % as defined by the colormap index. % % The format of the SyncImage method is: % % MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PushColormapIndex(Image *image,const Quantum index, MagickBooleanType *range_exception) { if ((size_t) index < image->colors) return(index); *range_exception=MagickTrue; return((Quantum) 0); } MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType range_exception, status, taint; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->ping != MagickFalse) return(MagickTrue); if (image->storage_class != PseudoClass) return(MagickFalse); assert(image->colormap != (PixelInfo *) NULL); range_exception=MagickFalse; status=MagickTrue; taint=image->taint; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(range_exception,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum index; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->taint=taint; if ((image->ping == MagickFalse) && (range_exception != MagickFalse)) (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e S e t t i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageSettings() syncs any image_info global options into per-image % attributes. % % Note: in IMv6 free form 'options' were always mapped into 'artifacts', so % that operations and coders can find such settings. In IMv7 if a desired % per-image artifact is not set, then it will directly look for a global % option as a fallback, as such this copy is no longer needed, only the % link set up. % % The format of the SyncImageSettings method is: % % MagickBooleanType SyncImageSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % MagickBooleanType SyncImagesSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info, Image *images,ExceptionInfo *exception) { Image *image; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) (void) SyncImageSettings(image_info,image,exception); (void) DeleteImageOption(image_info,"page"); return(MagickTrue); } MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const char *option; GeometryInfo geometry_info; MagickStatusType flags; ResolutionType units; /* Sync image options. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); option=GetImageOption(image_info,"background"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->background_color, exception); option=GetImageOption(image_info,"black-point-compensation"); if (option != (const char *) NULL) image->black_point_compensation=(MagickBooleanType) ParseCommandOption( MagickBooleanOptions,MagickFalse,option); option=GetImageOption(image_info,"blue-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x; } option=GetImageOption(image_info,"bordercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->border_color, exception); /* FUTURE: do not sync compose to per-image compose setting here */ option=GetImageOption(image_info,"compose"); if (option != (const char *) NULL) image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,option); /* -- */ option=GetImageOption(image_info,"compress"); if (option != (const char *) NULL) image->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,option); option=GetImageOption(image_info,"debug"); if (option != (const char *) NULL) image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } option=GetImageOption(image_info,"depth"); if (option != (const char *) NULL) image->depth=StringToUnsignedLong(option); option=GetImageOption(image_info,"endian"); if (option != (const char *) NULL) image->endian=(EndianType) ParseCommandOption(MagickEndianOptions, MagickFalse,option); option=GetImageOption(image_info,"filter"); if (option != (const char *) NULL) image->filter=(FilterType) ParseCommandOption(MagickFilterOptions, MagickFalse,option); option=GetImageOption(image_info,"fuzz"); if (option != (const char *) NULL) image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"gravity"); if (option != (const char *) NULL) image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(image_info,"green-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.green_primary.y=image->chromaticity.green_primary.x; } option=GetImageOption(image_info,"intent"); if (option != (const char *) NULL) image->rendering_intent=(RenderingIntent) ParseCommandOption( MagickIntentOptions,MagickFalse,option); option=GetImageOption(image_info,"intensity"); if (option != (const char *) NULL) image->intensity=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,option); option=GetImageOption(image_info,"interlace"); if (option != (const char *) NULL) image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions, MagickFalse,option); option=GetImageOption(image_info,"interpolate"); if (option != (const char *) NULL) image->interpolate=(PixelInterpolateMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,option); option=GetImageOption(image_info,"loop"); if (option != (const char *) NULL) image->iterations=StringToUnsignedLong(option); option=GetImageOption(image_info,"mattecolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->matte_color, exception); option=GetImageOption(image_info,"orient"); if (option != (const char *) NULL) image->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,option); option=GetImageOption(image_info,"page"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) image->quality=StringToUnsignedLong(option); option=GetImageOption(image_info,"red-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.red_primary.x=geometry_info.rho; image->chromaticity.red_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.red_primary.y=image->chromaticity.red_primary.x; } if (image_info->quality != UndefinedCompressionQuality) image->quality=image_info->quality; option=GetImageOption(image_info,"scene"); if (option != (const char *) NULL) image->scene=StringToUnsignedLong(option); option=GetImageOption(image_info,"taint"); if (option != (const char *) NULL) image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"tile-offset"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"transparent-color"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->transparent_color, exception); option=GetImageOption(image_info,"type"); if (option != (const char *) NULL) image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, option); option=GetImageOption(image_info,"units"); units=image_info->units; if (option != (const char *) NULL) units=(ResolutionType) ParseCommandOption(MagickResolutionOptions, MagickFalse,option); if (units != UndefinedResolution) { if (image->units != units) switch (image->units) { case PixelsPerInchResolution: { if (units == PixelsPerCentimeterResolution) { image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case PixelsPerCentimeterResolution: { if (units == PixelsPerInchResolution) { image->resolution.x=(double) ((size_t) (100.0*2.54* image->resolution.x+0.5))/100.0; image->resolution.y=(double) ((size_t) (100.0*2.54* image->resolution.y+0.5))/100.0; } break; } default: break; } image->units=units; option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } } option=GetImageOption(image_info,"virtual-pixel"); if (option != (const char *) NULL) (void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod) ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option), exception); option=GetImageOption(image_info,"white-point"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.white_point.y=image->chromaticity.white_point.x; } /* Pointer to allow the lookup of pre-image artifact will fallback to a global option setting/define. This saves a lot of duplication of global options into per-image artifacts, while ensuring only specifically set per-image artifacts are preserved when parenthesis ends. */ if (image->image_info != (ImageInfo *) NULL) image->image_info=DestroyImageInfo(image->image_info); image->image_info=CloneImageInfo(image_info); return(MagickTrue); }
GB_unop__identity_bool_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_bool_fc32) // op(A') function: GB (_unop_tran__identity_bool_fc32) // C type: bool // A type: GxB_FC32_t // cast: bool cij = (crealf (aij) != 0) || (cimagf (aij) != 0) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (crealf (aij) != 0) || (cimagf (aij) != 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (crealf (aij) != 0) || (cimagf (aij) != 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_bool_fc32) ( bool *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; bool z = (crealf (aij) != 0) || (cimagf (aij) != 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; bool z = (crealf (aij) != 0) || (cimagf (aij) != 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_bool_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
reduction.c
#include <stdio.h> #define N 1000000ll #define SUM (N * (N-1)/2) int main (void) { long long a, i; #pragma omp target parallel shared(a) private(i) { #pragma omp master a = 0; #pragma omp barrier #pragma omp for reduction(+:a) for (i = 0; i < N; i++) { a += i; } // The Sum shall be sum:[0:N] #pragma omp single { if (a != SUM) printf ("Incorrect result = %lld, expected = %lld!\n", a, SUM); else printf ("The result is correct = %lld!\n", a); } } return 0; }
GB_binop__second_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__second_bool) // A.*B function (eWiseMult): GB (_AemultB_08__second_bool) // A.*B function (eWiseMult): GB (_AemultB_02__second_bool) // A.*B function (eWiseMult): GB (_AemultB_04__second_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__second_bool) // A*D function (colscale): GB (_AxD__second_bool) // D*A function (rowscale): GB (_DxB__second_bool) // C+=B function (dense accum): GB (_Cdense_accumB__second_bool) // C+=b function (dense accum): GB (_Cdense_accumb__second_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_bool) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: bool // A type: bool // A pattern? 1 // B type: bool // B pattern? 0 // BinaryOp: cij = bij #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // true if values of A are not used #define GB_A_IS_PATTERN \ 1 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ bool bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = y ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 1 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SECOND || GxB_NO_BOOL || GxB_NO_SECOND_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__second_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__second_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__second_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__second_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__second_bool) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__second_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; bool alpha_scalar ; bool beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((bool *) alpha_scalar_in)) ; beta_scalar = (*((bool *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__second_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__second_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__second_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__second_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = GBX (Bx, p, false) ; Cx [p] = bij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = y ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = y ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
regex-dna.c
// The Computer Language Benchmarks Game // http://benchmarksgame.alioth.debian.org/ // // Based on C contribution of Mike Pall // Contributed by The Anh Tran /* http://benchmarksgame.alioth.debian.org/u64q/program.php?test=regexdna&lang=gcc&id=4 usr/bin/gcc -pipe -Wall -O3 -fomit-frame-pointer -march=native -fopenmp regexdna.gcc-4.c -o regexdna.gcc-4.gcc_run -lpcre ./regexdna.gcc-4.gcc_run 0 < regexdna-input5000000.txt */ #define _GNU_SOURCE #include <omp.h> #include <sched.h> #include <pcre.h> #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <memory.h> // read all redirected data from stdin // strip DNA headers and newline characters char* ReadInput_StripHeader( size_t *file_size, size_t *strip_size ) { // get input size *file_size = ftell(stdin); fseek(stdin, 0, SEEK_END); *file_size = ftell(stdin) - *file_size; fseek(stdin, 0, SEEK_SET); *strip_size = 0; // load original content into memory char* input = (char*)malloc(*file_size +1); assert(input != 0); { size_t sz = fread(input, 1, *file_size, stdin); assert(sz == *file_size); input[*file_size] = 0; } // alloc space for regex_replace char* output = (char*)malloc(*file_size); assert(output != 0); const char* re_error; int re_erroff; // compile pattern pcre* re = pcre_compile(">.*\\n|\\n", 0, &re_error, &re_erroff, 0); pcre_extra* re_extra = pcre_study(re, 0, &re_error); assert(re != 0); int position; int match[3]; // regex_replace for( position = 0; pcre_exec(re, re_extra, input, *file_size, position, 0, match, 3) >= 0; position = match[1] ) { int char_to_copy = match[0] - position; memcpy(output + (*strip_size), input + position, char_to_copy); *strip_size += char_to_copy; } // copy remain part int char_to_copy = *file_size - position; memcpy(output + (*strip_size), input + position, char_to_copy); *strip_size += char_to_copy; free(input); pcre_free(re_extra); pcre_free(re); return output; } void Count_Patterns(char const* input, size_t input_len, char* result) { static char const* ptns[] = { "agggtaaa|tttaccct", "[cgt]gggtaaa|tttaccc[acg]", "a[act]ggtaaa|tttacc[agt]t", "ag[act]gtaaa|tttac[agt]ct", "agg[act]taaa|ttta[agt]cct", "aggg[acg]aaa|ttt[cgt]ccct", "agggt[cgt]aa|tt[acg]accct", "agggta[cgt]a|t[acg]taccct", "agggtaa[cgt]|[acg]ttaccct" }; static const int n_ptns = sizeof(ptns) / sizeof(ptns[0]); static size_t counters[9]; int i; #pragma omp for schedule(dynamic, 1) nowait for (i = 0; i < n_ptns; ++i) { const char* re_error = 0; int re_erroff = 0; pcre* re = pcre_compile(ptns[i], 0, &re_error, &re_erroff, 0); pcre_extra* re_extra = pcre_study(re, 0, &re_error); assert(re != 0); int position, count; int match[3]; // regex_search for( position = count = 0; pcre_exec(re, re_extra, input, input_len, position, 0, match, 3) >= 0; position = match[1] ) ++count; counters[i] = count; pcre_free(re_extra); pcre_free(re); } // we want the last thread, reaching this code block, to print result static size_t thread_passed = 0; if (__sync_add_and_fetch(&thread_passed, 1) == (size_t)omp_get_num_threads() ) { int plen = 0; int i; for (i = 0; i < n_ptns; ++i) plen += sprintf(result + plen, "%s %d\n", ptns[i], counters[i]); thread_passed = 0; } } typedef struct IUB_T { const char* iub; int len; } IUB; IUB const iub_table[] = { {0}, {"(c|g|t)", 7}, {0}, {"(a|g|t)", 7}, {0}, {0}, {0}, {"(a|c|t)", 7}, {0}, {0}, {"(g|t)", 5}, {0}, {"(a|c)", 5}, {"(a|c|g|t)", 9}, {0}, {0}, {0}, {"(a|g)", 5}, {"(c|t)", 5}, {0}, {0}, {"(a|c|g)", 7}, {"(a|t)", 5}, {0}, {"(c|t)", 5} }; int const n_iub = sizeof(iub_table)/sizeof(iub_table[0]); void Replace_Patterns(char const* input, size_t input_len, size_t* repl_len) { #pragma omp single nowait { // input_len * 1.5 char* output = (char*)malloc(input_len + (input_len >> 1)); assert(output != 0); const char* re_error = 0; int re_erroff = 0; pcre* re = pcre_compile("[BDHKMNRSVWY]", 0, &re_error, &re_erroff, 0); pcre_extra* re_extra = pcre_study(re, 0, &re_error); assert(re != 0); int position; int match[3]; int replace_len = 0; // regex_replace for( position = 0; pcre_exec(re, re_extra, input, input_len, position, 0, match, 3) >= 0; position = match[1] ) { int char_to_copy = match[0] - position; memcpy(output + replace_len, input + position, char_to_copy); replace_len += char_to_copy; IUB const* i_r = iub_table + (input[match[0]] - 'A'); char_to_copy = i_r->len; memcpy(output + replace_len, i_r->iub, char_to_copy); replace_len += char_to_copy; } // copy remain part int char_to_copy = input_len - position; memcpy(output + replace_len, input + position, char_to_copy); replace_len += char_to_copy; free(output); pcre_free(re_extra); pcre_free(re); *repl_len = replace_len; } } // Detect single - multi thread benchmark int GetThreadCount() { cpu_set_t cs; int count = 0; int i; CPU_ZERO(&cs); sched_getaffinity(0, sizeof(cs), &cs); for (i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET(i, &cs)) ++count; } return count; } int main() { size_t initial_length = 0; size_t striped_length = 0; size_t replace_length = 0; char* input = ReadInput_StripHeader (&initial_length, &striped_length); char match_result[1024]; #pragma omp parallel default(shared) num_threads(GetThreadCount()) { Count_Patterns (input, striped_length, match_result); Replace_Patterns(input, striped_length, &replace_length); } printf("%s\n%d\n%d\n%d\n", match_result, initial_length, striped_length, replace_length ); free(input); return 0; }
integrate.c
/* * integrate.c: Example of numerical integration in OpenMP. * * (C) 2015 Mikhail Kurnosov <mkurnosov@gmail.com> */ #include <stdio.h> #include <math.h> #include <sys/time.h> #include <omp.h> const double PI = 3.14159265358979323846; const double a = -4.0; const double b = 4.0; const int nsteps = 40000000; double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } double func(double x) { return exp(-x * x); } /* integrate: Integrates by rectangle method (midpoint rule) */ double integrate(double (*func)(double), double a, double b, int n) { double h = (b - a) / n; double sum = 0.0; for (int i = 0; i < n; i++) sum += func(a + h * (i + 0.5)); sum *= h; return sum; } double run_serial() { double t = wtime(); double res = integrate(func, a, b, nsteps); t = wtime() - t; printf("Result (serial): %.12f; error %.12f\n", res, fabs(res - sqrt(PI))); return t; } double integrate_omp(double (*func)(double), double a, double b, int n) { double h = (b - a) / n; double sum = 0.0; #pragma omp parallel { int nthreads = omp_get_num_threads(); int threadid = omp_get_thread_num(); int items_per_thread = n / nthreads; int lb = threadid * items_per_thread; int ub = (threadid == nthreads - 1) ? (n - 1) : (lb + items_per_thread - 1); double sumloc = 0.0; for (int i = lb; i <= ub; i++) sumloc += func(a + h * (i + 0.5)); #pragma omp atomic sum += sumloc; } sum *= h; return sum; } double run_parallel() { double t = wtime(); double res = integrate_omp(func, a, b, nsteps); t = wtime() - t; printf("Result (parallel): %.12f; error %.12f\n", res, fabs(res - sqrt(PI))); return t; } int main(int argc, char **argv) { printf("Integration f(x) on [%.12f, %.12f], nsteps = %d\n", a, b, nsteps); double tserial = run_serial(); double tparallel = run_parallel(); printf("Execution time (serial): %.6f\n", tserial); printf("Execution time (parallel): %.6f\n", tparallel); printf("Speedup: %.2f\n", tserial / tparallel); return 0; }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(8*t2-Nz,4)),t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(4*t1+Ny+5,4)),floord(8*t2+Ny+4,4)),floord(8*t1-8*t2+Nz+Ny+3,4));t3++) { for (t4=max(max(max(0,ceild(t1-63,64)),ceild(8*t2-Nz-252,256)),ceild(4*t3-Ny-252,256));t4<=min(min(min(min(floord(4*t3+Nx,256),floord(Nt+Nx-4,256)),floord(4*t1+Nx+5,256)),floord(8*t2+Nx+4,256)),floord(8*t1-8*t2+Nz+Nx+3,256));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),4*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),4*t3+2),256*t4+254),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) { lbv=max(256*t4,t5+1); ubv=min(256*t4+255,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
c-tree.h
/* Definitions for C parsing and type checking. Copyright (C) 1987, 1993, 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef GCC_C_TREE_H #define GCC_C_TREE_H #include "c-common.h" #include "toplev.h" #include "diagnostic.h" /* struct lang_identifier is private to c-decl.c, but langhooks.c needs to know how big it is. This is sanity-checked in c-decl.c. */ #define C_SIZEOF_STRUCT_LANG_IDENTIFIER \ (sizeof (struct c_common_identifier) + 3 * sizeof (void *)) /* Language-specific declaration information. */ struct lang_decl GTY(()) { char dummy; }; /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */ #define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE) /* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE nonzero if the definition of the type has already started. */ #define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE) /* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable declarations whose type would be completed by completing that type. */ #define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE) /* In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. C_RID_CODE (node) is then the RID_* value of the keyword, and C_RID_YYCODE is the token number wanted by Yacc. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID) struct lang_type GTY(()) { /* In a RECORD_TYPE, a sorted array of the fields of the type. */ struct sorted_fields_type * GTY ((reorder ("resort_sorted_fields"))) s; /* In an ENUMERAL_TYPE, the min and max values. */ tree enum_min; tree enum_max; /* In a RECORD_TYPE, information specific to Objective-C, such as a list of adopted protocols or a pointer to a corresponding @interface. See objc/objc-act.h for details. */ tree objc_info; }; /* Record whether a type or decl was written with nonconstant size. Note that TYPE_SIZE may have simplified to a constant. */ #define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE) #define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was defined without an explicit return type. */ #define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */ #define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP) /* For FUNCTION_DECLs, evaluates true if the decl is built-in but has been declared. */ #define C_DECL_DECLARED_BUILTIN(EXP) \ DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP)) /* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a built-in prototype and does not have a non-built-in prototype. */ #define C_DECL_BUILTIN_PROTOTYPE(EXP) \ DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a decl was declared register. This is strictly a front-end flag, whereas DECL_REGISTER is used for code generation; they may differ for structures with volatile fields. */ #define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP) /* Record whether a decl was used in an expression anywhere except an unevaluated operand of sizeof / typeof / alignof. This is only used for functions declared static but not defined, though outside sizeof and typeof it is set for other function decls as well. */ #define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a label was defined in a statement expression which has finished and so can no longer be jumped to. */ #define C_DECL_UNJUMPABLE_STMT_EXPR(EXP) \ DECL_LANG_FLAG_6 (LABEL_DECL_CHECK (EXP)) /* Record whether a label was the subject of a goto from outside the current level of statement expression nesting and so cannot be defined right now. */ #define C_DECL_UNDEFINABLE_STMT_EXPR(EXP) \ DECL_LANG_FLAG_7 (LABEL_DECL_CHECK (EXP)) /* Record whether a label was defined in the scope of an identifier with variably modified type which has finished and so can no longer be jumped to. */ #define C_DECL_UNJUMPABLE_VM(EXP) \ DECL_LANG_FLAG_3 (LABEL_DECL_CHECK (EXP)) /* Record whether a label was the subject of a goto from outside the current level of scopes of identifiers with variably modified type and so cannot be defined right now. */ #define C_DECL_UNDEFINABLE_VM(EXP) \ DECL_LANG_FLAG_5 (LABEL_DECL_CHECK (EXP)) /* Record whether a variable has been declared threadprivate by #pragma omp threadprivate. */ #define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL)) /* Nonzero for a decl which either doesn't exist or isn't a prototype. N.B. Could be simplified if all built-in decls had complete prototypes (but this is presently difficult because some of them need FILE*). */ #define C_DECL_ISNT_PROTOTYPE(EXP) \ (EXP == 0 \ || (TYPE_ARG_TYPES (TREE_TYPE (EXP)) == 0 \ && !DECL_BUILT_IN (EXP))) /* For FUNCTION_TYPE, a hidden list of types of arguments. The same as TYPE_ARG_TYPES for functions with prototypes, but created for functions without prototypes. */ #define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE) /* Record parser information about an expression that is irrelevant for code generation alongside a tree representing its value. */ struct c_expr { /* The value of the expression. */ tree value; /* Record the original binary operator of an expression, which may have been changed by fold, STRING_CST for unparenthesized string constants, or ERROR_MARK for other expressions (including parenthesized expressions). */ enum tree_code original_code; }; /* A kind of type specifier. Note that this information is currently only used to distinguish tag definitions, tag references and typeof uses. */ enum c_typespec_kind { /* A reserved keyword type specifier. */ ctsk_resword, /* A reference to a tag, previously declared, such as "struct foo". This includes where the previous declaration was as a different kind of tag, in which case this is only valid if shadowing that tag in an inner scope. */ ctsk_tagref, /* A reference to a tag, not previously declared in a visible scope. */ ctsk_tagfirstref, /* A definition of a tag such as "struct foo { int a; }". */ ctsk_tagdef, /* A typedef name. */ ctsk_typedef, /* An ObjC-specific kind of type specifier. */ ctsk_objc, /* A typeof specifier. */ ctsk_typeof }; /* A type specifier: this structure is created in the parser and passed to declspecs_add_type only. */ struct c_typespec { /* What kind of type specifier this is. */ enum c_typespec_kind kind; /* The specifier itself. */ tree spec; }; /* A storage class specifier. */ enum c_storage_class { csc_none, csc_auto, csc_extern, csc_register, csc_static, csc_typedef }; /* A type specifier keyword "void", "_Bool", "char", "int", "float", "double", or none of these. */ enum c_typespec_keyword { cts_none, cts_void, cts_bool, cts_char, cts_int, cts_float, cts_double, cts_dfloat32, cts_dfloat64, cts_dfloat128 }; /* A sequence of declaration specifiers in C. */ struct c_declspecs { /* The type specified, if a single type specifier such as a struct, union or enum specifier, typedef name or typeof specifies the whole type, or NULL_TREE if none or a keyword such as "void" or "char" is used. Does not include qualifiers. */ tree type; /* The attributes from a typedef decl. */ tree decl_attr; /* When parsing, the attributes. Outside the parser, this will be NULL; attributes (possibly from multiple lists) will be passed separately. */ tree attrs; /* Any type specifier keyword used such as "int", not reflecting modifiers such as "short", or cts_none if none. */ enum c_typespec_keyword typespec_word; /* The storage class specifier, or csc_none if none. */ enum c_storage_class storage_class; /* Whether any declaration specifiers have been seen at all. */ BOOL_BITFIELD declspecs_seen_p : 1; /* Whether a type specifier has been seen. */ BOOL_BITFIELD type_seen_p : 1; /* Whether something other than a storage class specifier or attribute has been seen. This is used to warn for the obsolescent usage of storage class specifiers other than at the start of the list. (Doing this properly would require function specifiers to be handled separately from storage class specifiers.) */ BOOL_BITFIELD non_sc_seen_p : 1; /* Whether the type is specified by a typedef or typeof name. */ BOOL_BITFIELD typedef_p : 1; /* Whether a struct, union or enum type either had its content defined by a type specifier in the list or was the first visible declaration of its tag. */ BOOL_BITFIELD tag_defined_p : 1; /* Whether the type is explicitly "signed" or specified by a typedef whose type is explicitly "signed". */ BOOL_BITFIELD explicit_signed_p : 1; /* Whether the specifiers include a deprecated typedef. */ BOOL_BITFIELD deprecated_p : 1; /* APPLE LOCAL begin "unavailable" attribute (radar 2809697) */ /* Whether the specifiers include a unavailable typedef. */ BOOL_BITFIELD unavailable_p : 1; /* APPLE LOCAL end "unavailable" attribute (radar 2809697) */ /* APPLE LOCAL begin private extern */ /* Whether the specifiers include __private_extern. */ BOOL_BITFIELD private_extern_p : 1; /* APPLE LOCAL end private extern */ /* APPLE LOCAL CW asm blocks */ BOOL_BITFIELD iasm_asm_specbit : 1; /* Whether the type defaulted to "int" because there were no type specifiers. */ BOOL_BITFIELD default_int_p; /* Whether "long" was specified. */ BOOL_BITFIELD long_p : 1; /* Whether "long" was specified more than once. */ BOOL_BITFIELD long_long_p : 1; /* Whether "short" was specified. */ BOOL_BITFIELD short_p : 1; /* Whether "signed" was specified. */ BOOL_BITFIELD signed_p : 1; /* Whether "unsigned" was specified. */ BOOL_BITFIELD unsigned_p : 1; /* Whether "complex" was specified. */ BOOL_BITFIELD complex_p : 1; /* Whether "inline" was specified. */ BOOL_BITFIELD inline_p : 1; /* Whether "__thread" was specified. */ BOOL_BITFIELD thread_p : 1; /* Whether "const" was specified. */ BOOL_BITFIELD const_p : 1; /* Whether "volatile" was specified. */ BOOL_BITFIELD volatile_p : 1; /* Whether "restrict" was specified. */ BOOL_BITFIELD restrict_p : 1; }; /* The various kinds of declarators in C. */ enum c_declarator_kind { /* An identifier. */ cdk_id, /* A function. */ cdk_function, /* An array. */ cdk_array, /* A pointer. */ cdk_pointer, /* Parenthesized declarator with nested attributes. */ cdk_attrs }; /* Information about the parameters in a function declarator. */ struct c_arg_info { /* A list of parameter decls. */ tree parms; /* A list of structure, union and enum tags defined. */ tree tags; /* A list of argument types to go in the FUNCTION_TYPE. */ tree types; /* A list of non-parameter decls (notably enumeration constants) defined with the parameters. */ tree others; /* A list of VLA sizes from the parameters. In a function definition, these are used to ensure that side-effects in sizes of arrays converted to pointers (such as a parameter int i[n++]) take place; otherwise, they are ignored. */ tree pending_sizes; /* True when these arguments had [*]. */ BOOL_BITFIELD had_vla_unspec : 1; }; /* A declarator. */ struct c_declarator { /* The kind of declarator. */ enum c_declarator_kind kind; /* Except for cdk_id, the contained declarator. For cdk_id, NULL. */ struct c_declarator *declarator; location_t id_loc; /* Currently only set for cdk_id. */ union { /* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract declarator. */ tree id; /* For functions. */ struct c_arg_info *arg_info; /* For arrays. */ struct { /* The array dimension, or NULL for [] and [*]. */ tree dimen; /* The qualifiers inside []. */ int quals; /* The attributes (currently ignored) inside []. */ tree attrs; /* Whether [static] was used. */ BOOL_BITFIELD static_p : 1; /* Whether [*] was used. */ BOOL_BITFIELD vla_unspec_p : 1; } array; /* For pointers, the qualifiers on the pointer type. */ int pointer_quals; /* For attributes. */ tree attrs; } u; }; /* A type name. */ struct c_type_name { /* The declaration specifiers. */ struct c_declspecs *specs; /* The declarator. */ struct c_declarator *declarator; }; /* A parameter. */ struct c_parm { /* The declaration specifiers, minus any prefix attributes. */ struct c_declspecs *specs; /* The attributes. */ tree attrs; /* The declarator. */ struct c_declarator *declarator; }; /* Save and restore the variables in this file and elsewhere that keep track of the progress of compilation of the current function. Used for nested functions. */ struct language_function GTY(()) { struct c_language_function base; tree x_break_label; tree x_cont_label; struct c_switch * GTY((skip)) x_switch_stack; struct c_arg_info * GTY((skip)) arg_info; int returns_value; int returns_null; int returns_abnormally; int warn_about_return_type; /* APPLE LOCAL begin mainline 4.3 2006-10-31 4134307 */ /* APPLE LOCAL end mainline 4.3 2006-10-31 4134307 */ }; /* Save lists of labels used or defined in particular contexts. Allocated on the parser obstack. */ struct c_label_list { /* The label at the head of the list. */ tree label; /* The rest of the list. */ struct c_label_list *next; }; /* Statement expression context. */ struct c_label_context_se { /* The labels defined at this level of nesting. */ struct c_label_list *labels_def; /* The labels used at this level of nesting. */ struct c_label_list *labels_used; /* The next outermost context. */ struct c_label_context_se *next; }; /* Context of variably modified declarations. */ struct c_label_context_vm { /* The labels defined at this level of nesting. */ struct c_label_list *labels_def; /* The labels used at this level of nesting. */ struct c_label_list *labels_used; /* The scope of this context. Multiple contexts may be at the same numbered scope, since each variably modified declaration starts a new context. */ unsigned scope; /* The next outermost context. */ struct c_label_context_vm *next; }; /* in c-parser.c */ extern void c_parse_init (void); /* in c-aux-info.c */ extern void gen_aux_info_record (tree, int, int, int); /* in c-decl.c */ extern struct obstack parser_obstack; extern tree c_break_label; extern tree c_cont_label; extern int global_bindings_p (void); extern void push_scope (void); extern tree pop_scope (void); extern void insert_block (tree); extern void c_expand_body (tree); extern void c_init_decl_processing (void); extern void c_dup_lang_specific_decl (tree); extern void c_print_identifier (FILE *, tree, int); extern int quals_from_declspecs (const struct c_declspecs *); extern struct c_declarator *build_array_declarator (tree, struct c_declspecs *, bool, bool); extern tree build_enumerator (tree, tree); extern tree check_for_loop_decls (void); extern void mark_forward_parm_decls (void); extern void declare_parm_level (void); extern void undeclared_variable (tree, location_t); extern tree declare_label (tree); extern tree define_label (location_t, tree); extern void c_maybe_initialize_eh (void); extern void finish_decl (tree, tree, tree); extern tree finish_enum (tree, tree, tree); extern void finish_function (void); extern tree finish_struct (tree, tree, tree); extern struct c_arg_info *get_parm_info (bool); extern tree grokfield (struct c_declarator *, struct c_declspecs *, tree); extern tree groktypename (struct c_type_name *); extern tree grokparm (const struct c_parm *); extern tree implicitly_declare (tree); extern void keep_next_level (void); extern void pending_xref_error (void); extern void c_push_function_context (struct function *); extern void c_pop_function_context (struct function *); extern void push_parm_decl (const struct c_parm *); extern struct c_declarator *set_array_declarator_inner (struct c_declarator *, struct c_declarator *, bool); extern tree builtin_function (const char *, tree, int, enum built_in_class, const char *, tree); extern void shadow_tag (const struct c_declspecs *); extern void shadow_tag_warned (const struct c_declspecs *, int); extern tree start_enum (tree); extern int start_function (struct c_declspecs *, struct c_declarator *, tree); extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool, tree); extern tree start_struct (enum tree_code, tree); extern void store_parm_decls (void); extern void store_parm_decls_from (struct c_arg_info *); extern tree xref_tag (enum tree_code, tree); extern struct c_typespec parser_xref_tag (enum tree_code, tree); extern int c_expand_decl (tree); extern struct c_parm *build_c_parm (struct c_declspecs *, tree, struct c_declarator *); extern struct c_declarator *build_attrs_declarator (tree, struct c_declarator *); extern struct c_declarator *build_function_declarator (struct c_arg_info *, struct c_declarator *); extern struct c_declarator *build_id_declarator (tree); extern struct c_declarator *make_pointer_declarator (struct c_declspecs *, struct c_declarator *); extern struct c_declspecs *build_null_declspecs (void); extern struct c_declspecs *declspecs_add_qual (struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_type (struct c_declspecs *, struct c_typespec); extern struct c_declspecs *declspecs_add_scspec (struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_attrs (struct c_declspecs *, tree); extern struct c_declspecs *finish_declspecs (struct c_declspecs *); /* in c-objc-common.c */ extern int c_disregard_inline_limits (tree); extern int c_cannot_inline_tree_fn (tree *); extern bool c_objc_common_init (void); extern bool c_missing_noreturn_ok_p (tree); extern tree c_objc_common_truthvalue_conversion (tree expr); extern bool c_warn_unused_global_decl (tree); extern void c_initialize_diagnostics (diagnostic_context *); extern bool c_vla_unspec_p (tree x, tree fn); #define c_build_type_variant(TYPE, CONST_P, VOLATILE_P) \ c_build_qualified_type ((TYPE), \ ((CONST_P) ? TYPE_QUAL_CONST : 0) | \ ((VOLATILE_P) ? TYPE_QUAL_VOLATILE : 0)) /* in c-typeck.c */ extern int in_alignof; extern int in_sizeof; extern int in_typeof; extern struct c_switch *c_switch_stack; extern struct c_label_context_se *label_context_stack_se; extern struct c_label_context_vm *label_context_stack_vm; extern tree require_complete_type (tree); extern int same_translation_unit_p (tree, tree); extern int comptypes (tree, tree); extern bool c_vla_type_p (tree); extern bool c_mark_addressable (tree); extern void c_incomplete_type_error (tree, tree); extern tree c_type_promotes_to (tree); extern struct c_expr default_function_array_conversion (struct c_expr); extern tree composite_type (tree, tree); extern tree build_component_ref (tree, tree); extern tree build_array_ref (tree, tree); extern tree build_external_ref (tree, int, location_t); extern void pop_maybe_used (bool); extern struct c_expr c_expr_sizeof_expr (struct c_expr); extern struct c_expr c_expr_sizeof_type (struct c_type_name *); extern struct c_expr parser_build_unary_op (enum tree_code, struct c_expr); extern struct c_expr parser_build_binary_op (enum tree_code, struct c_expr, struct c_expr); extern tree build_conditional_expr (tree, tree, tree); extern tree build_compound_expr (tree, tree); extern tree c_cast_expr (struct c_type_name *, tree); extern tree build_c_cast (tree, tree); extern void store_init_value (tree, tree); extern void error_init (const char *); extern void pedwarn_init (const char *); extern void maybe_warn_string_init (tree, struct c_expr); extern void start_init (tree, tree, int); extern void finish_init (void); extern void really_start_incremental_init (tree); extern void push_init_level (int); extern struct c_expr pop_init_level (int); extern void set_init_index (tree, tree); extern void set_init_label (tree); extern void process_init_element (struct c_expr); extern tree build_compound_literal (tree, tree); extern tree c_start_case (tree); extern void c_finish_case (tree); extern tree build_asm_expr (tree, tree, tree, tree, bool); extern tree build_asm_stmt (tree, tree); extern tree c_convert_parm_for_inlining (tree, tree, tree, int); extern int c_types_compatible_p (tree, tree); extern tree c_begin_compound_stmt (bool); extern tree c_end_compound_stmt (tree, bool); extern void c_finish_if_stmt (location_t, tree, tree, tree, bool); /* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \ extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, tree, bool); /* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \ extern tree c_begin_stmt_expr (void); extern tree c_finish_stmt_expr (tree); extern tree c_process_expr_stmt (tree); extern tree c_finish_expr_stmt (tree); extern tree c_finish_return (tree); extern tree c_finish_bc_stmt (tree *, bool); extern tree c_finish_goto_label (tree); extern tree c_finish_goto_ptr (tree); extern void c_begin_vm_scope (unsigned int); extern void c_end_vm_scope (unsigned int); extern tree c_expr_to_decl (tree, bool *, bool *, bool *); extern tree c_begin_omp_parallel (void); extern tree c_finish_omp_parallel (tree, tree); extern tree c_finish_omp_clauses (tree); /* APPLE LOCAL begin CW asm blocks */ extern tree get_structure_offset (tree, tree); extern tree lookup_struct_or_union_tag (tree); /* APPLE LOCAL end CW asm blocks */ /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ extern int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ extern int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ extern int current_function_returns_abnormally; /* Nonzero means we are reading code that came from a system header file. */ extern int system_header_p; /* True means global_bindings_p should return false even if the scope stack says we are in file scope. */ extern bool c_override_global_bindings_to_false; /* True means we've initialized exception handling. */ extern bool c_eh_initialized_p; /* In c-decl.c */ extern void c_finish_incomplete_decl (tree); extern void c_write_global_declarations (void); /* APPLE LOCAL radar 5741070 */ extern tree c_return_interface_record_type (tree); /* In order for the format checking to accept the C frontend diagnostic framework extensions, you must include this file before toplev.h, not after. */ #if GCC_VERSION >= 4001 #define ATTRIBUTE_GCC_CDIAG(m, n) __attribute__ ((__format__ (GCC_DIAG_STYLE, m ,n))) ATTRIBUTE_NONNULL(m) #else #define ATTRIBUTE_GCC_CDIAG(m, n) ATTRIBUTE_NONNULL(m) #endif extern void pedwarn_c90 (const char *, ...) ATTRIBUTE_GCC_CDIAG(1,2); extern void pedwarn_c99 (const char *, ...) ATTRIBUTE_GCC_CDIAG(1,2); #endif /* ! GCC_C_TREE_H */
openmpi.c
/*filename: mpi_egkefaliko_test.c , original file mpi_heat2D.c*/ #include <mpi.h> #include <stdio.h> #include <stdlib.h> #include <omp.h> #include "mpi_parallelio.h" #include "grid_sizes.h" /* Includes sizes */ #define MAX_TEMP (X_SIZE*Y_SIZE)*(X_SIZE*Y_SIZE)/8 /* bounds for the */ #define MIN_TEMP 10 /* rand function */ #define TIMESTEPS 100 /* number of "update" iterations */ #define ERROR_CODE -666 /* label for defining errors */ #define OK 1 /* label for defining that everything went OK :) */ #define NORTH 0 /* indicates */ #define SOUTH 1 /* the coordinates */ #define WEST 2 /* of neighbouring */ #define EAST 3 /* processes */ #define CONV_ERROR .05f #define MASTER 0 /* Parametres for temperature equation */ struct Parms { float cx; float cy; } parms = {0.1, 0.1}; MPI_Comm MPI_CART_COMM; /*create new communicator in order to change the topology*/ /* start of functions prototypes */ int init_array(float** array, int y_size,int x_size, int neighbors[4]); void prtdat(int nx, int ny, float *u1, char *fnam); void print_array(float* array, int y_size,int x_size); void update(int start_x, int end_x, int start_y, int end_y, int y_size, float *u1, float *u2); void update_canvas(int left_border_x, int right_border_x, int up_border_y, int down_border_y, int x_size, int neighbors[4], float *u1, float *u2); /* end of functions prototypes */ int my_rank; int main(void){ int rank_size; /* rank info */ int local_size_x, local_size_y; /* local dimensions */ int processor_scheme[2] = {0,0}; /* coordinates of tasks over the heating table */ int my_coords[2]; int ix,iy,iz; int provided; #ifdef REDUCE_PROGRAM int convergence_condition[2]; #endif float* sub_array; int local_sum; int sum; int periods[2]={0,0}; /* initializing period parameter to avoid data circulation */ int neighbors[4]; /* max_number of neighbors */ MPI_Request sending_requests[2][4], receiving_requests[2][4]; /* requests for non-blocking communication, both receiving and sending */ double time_start, time_end; /* variables to save starting and ending time */ MPI_Datatype vertical_vector, vertical_vector_temp, horizontal_vector, horizontal_vector_temp; /* Vectors for sending and receiving the vertical halos */ MPI_Init_thread(NULL,NULL,MPI_THREAD_MULTIPLE,&provided); /* initialize */ MPI_Comm_rank(MPI_COMM_WORLD,&my_rank); /* the */ MPI_Comm_size(MPI_COMM_WORLD,&rank_size); /* mpi_enviroment */ if(my_rank == 0){ printf("Provided %d\n",provided); } MPI_Barrier(MPI_COMM_WORLD); /* sychronize all processors before starting computing time needed */ time_start = MPI_Wtime(); MPI_Dims_create(rank_size, 2, processor_scheme); /* Create the processor scheme, how they are going to be organized */ MPI_Cart_create(MPI_COMM_WORLD, 2, processor_scheme, periods, 1, &MPI_CART_COMM); /* Create cartesian topology for the 2D grid */ MPI_Cart_coords(MPI_CART_COMM,my_rank,2,my_coords); MPI_Cart_shift(MPI_CART_COMM,0,1,&neighbors[NORTH], &neighbors[SOUTH]); /* Y axis */ MPI_Cart_shift(MPI_CART_COMM,1,1,&neighbors[WEST], &neighbors[EAST]); /* X axis */ /* Decide how much load every process will get */ local_size_y = Y_SIZE / processor_scheme[0] + ( (my_coords[0] < Y_SIZE % processor_scheme[0]) ? 1 : 0); local_size_x = X_SIZE / processor_scheme[1] + ( (my_coords[1] < X_SIZE % processor_scheme[1]) ? 1 : 0); //printf("I am task %d (y,x)=(%d,%d) and I got N: %d, S:%d, W:%d, E:%d with local_size_x %d and local_size_y %d\n",my_rank,my_coords[0],my_coords[1],neighbors[NORTH], neighbors[SOUTH], neighbors[WEST], neighbors[EAST], local_size_x, local_size_y ); if(init_array(&sub_array, local_size_y, local_size_x, neighbors)==ERROR_CODE) { MPI_Abort(MPI_COMM_WORLD, ERROR_CODE); exit(1); } /* Initializing datatypes */ MPI_Type_vector(local_size_y, 1, local_size_x+2 , MPI_FLOAT, &vertical_vector_temp); /* Create datatype for sending/receiving column as one entity */ MPI_Type_create_resized(vertical_vector_temp, 0, sizeof(float), &vertical_vector); /* Resize as one float */ MPI_Type_commit(&vertical_vector); /* Commit type */ MPI_Type_contiguous(local_size_x, MPI_FLOAT, &horizontal_vector_temp); /* Create datatype for sending/receiving row as one entity */ MPI_Type_create_resized(horizontal_vector_temp, 0, sizeof(float), &horizontal_vector); /* Resize as one float */ MPI_Type_commit(&horizontal_vector); /* Commit type */ MPI_Barrier(MPI_COMM_WORLD); float *current_array, *future_array; int time_step, array_size = (local_size_x+2)*(local_size_y+2); current_array = sub_array; future_array = sub_array + array_size; #define GET_OFFSET(y_pos,x_pos) ( (x_pos) + (y_pos)*(local_size_x+2) ) MPI_Recv_init(current_array + GET_OFFSET(local_size_y+1,1), 1, horizontal_vector, neighbors[SOUTH], SOUTH, MPI_CART_COMM, &receiving_requests[0][SOUTH]); /* receive the southern halo, practically waiting to receive */ MPI_Recv_init(current_array + GET_OFFSET(0,1), 1, horizontal_vector, neighbors[NORTH], NORTH, MPI_CART_COMM, &receiving_requests[0][NORTH]); /* receive the northern halo, practically waiting to receive */ MPI_Recv_init(current_array + GET_OFFSET(1,local_size_x+1), 1, vertical_vector, neighbors[EAST], EAST, MPI_CART_COMM, &receiving_requests[0][EAST]); /* receive the eastern halo, practically waiting to receive */ MPI_Recv_init(current_array + GET_OFFSET(1,0), 1, vertical_vector, neighbors[WEST], WEST, MPI_CART_COMM, &receiving_requests[0][WEST]); MPI_Recv_init(future_array + GET_OFFSET(local_size_y+1,1), 1, horizontal_vector, neighbors[SOUTH], SOUTH, MPI_CART_COMM, &receiving_requests[1][SOUTH]); /* receive the southern halo, practically waiting to receive */ MPI_Recv_init(future_array + GET_OFFSET(0,1), 1, horizontal_vector, neighbors[NORTH], NORTH, MPI_CART_COMM, &receiving_requests[1][NORTH]); /* receive the northern halo, practically waiting to receive */ MPI_Recv_init(future_array + GET_OFFSET(1,local_size_x+1), 1, vertical_vector, neighbors[EAST], EAST, MPI_CART_COMM, &receiving_requests[1][EAST]); /* receive the eastern halo, practically waiting to receive */ MPI_Recv_init(future_array + GET_OFFSET(1,0), 1, vertical_vector, neighbors[WEST], WEST, MPI_CART_COMM, &receiving_requests[1][WEST]); MPI_Rsend_init(current_array + GET_OFFSET(1,1), 1, horizontal_vector, neighbors[NORTH], SOUTH, MPI_CART_COMM, &sending_requests[0][NORTH]); /* send data to the halo of the northern neighbor */ MPI_Rsend_init(current_array + GET_OFFSET(local_size_y,1), 1, horizontal_vector, neighbors[SOUTH], NORTH, MPI_CART_COMM, &sending_requests[0][SOUTH]); /* send data to the halo of the southern neighbor */ MPI_Rsend_init(current_array + GET_OFFSET(1,1), 1, vertical_vector, neighbors[WEST], EAST, MPI_CART_COMM, &sending_requests[0][WEST]); /* send data to the halo of the western neighbor */ MPI_Rsend_init(current_array + GET_OFFSET(1,local_size_x), 1, vertical_vector, neighbors[EAST], WEST, MPI_CART_COMM, &sending_requests[0][EAST]); /* send data to the halo of the eastern neighbor */ MPI_Rsend_init(future_array + GET_OFFSET(1,1), 1, horizontal_vector, neighbors[NORTH], SOUTH, MPI_CART_COMM, &sending_requests[1][NORTH]); /* send data to the halo of the northern neighbor */ MPI_Rsend_init(future_array + GET_OFFSET(local_size_y,1), 1, horizontal_vector, neighbors[SOUTH], NORTH, MPI_CART_COMM, &sending_requests[1][SOUTH]); /* send data to the halo of the southern neighbor */ MPI_Rsend_init(future_array + GET_OFFSET(1,1), 1, vertical_vector, neighbors[WEST], EAST, MPI_CART_COMM, &sending_requests[1][WEST]); /* send data to the halo of the western neighbor */ MPI_Rsend_init(future_array + GET_OFFSET(1,local_size_x), 1, vertical_vector, neighbors[EAST], WEST, MPI_CART_COMM, &sending_requests[1][EAST]); /* send data to the halo of the eastern neighbor */ iz = 0; #pragma omp parallel default(none) shared(array_size,local_size_x,local_size_y,sub_array,MPI_CART_COMM,receiving_requests,sending_requests,my_rank) private(current_array,future_array, time_step) firstprivate(iz,neighbors,horizontal_vector,vertical_vector) { for(time_step = 1; time_step <= TIMESTEPS; time_step++){ /* main for that updates the values of the subarrays for TIMESTEPS times */ current_array = sub_array + iz * array_size; future_array = sub_array + (1-iz) * array_size; #pragma omp single { MPI_Startall(4,receiving_requests[iz]); MPI_Startall(4,sending_requests[iz]); } /* update only the independent "white" slots of the sub_array */ update( 2, /* Left x axis bound for update */ local_size_x - 1, /* Right x axis bound for update */ 2, /* Upper y axis bound for update */ local_size_y - 1, /* Lower y axis bound for update */ local_size_x + 2, current_array, future_array); #pragma omp single MPI_Waitall(4, receiving_requests[iz], MPI_STATUSES_IGNORE); update_canvas( 1, local_size_x, 1, local_size_y, local_size_x + 2, neighbors, current_array, future_array); #pragma omp single MPI_Waitall(4, sending_requests[iz], MPI_STATUSES_IGNORE); #ifdef REDUCE_PROGRAM convergence_condition[0] = 1; for(iy=1; iy<=local_size_y; iy++){ for(ix=1 ; ix < local_size_x+1; ix++){ if((*(future_array + GET_OFFSET(iy,ix)) - *(current_array + GET_OFFSET(iy,ix)) / *(current_array + GET_OFFSET(iy,ix)) ) > CONV_ERROR){ convergence_condition[0] = 0; iy = local_size_y + 1; break; } } if(convergence_condition[0] == 0){ break; } } MPI_Allreduce(&convergence_condition[0], &convergence_condition[1] , 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD); /* first position has the sending condition and the second has receiving condition */ if(convergence_condition[1]) break; #endif iz = 1 - iz; /* change status from past to current array */ } } /* free the custom defined data types */ MPI_Type_free(&vertical_vector); MPI_Type_free(&horizontal_vector); MPI_Barrier(MPI_COMM_WORLD); if(my_rank == rank_size-1 ) { //why it computes the time correctly with rank_size -1 and not with master???????????? time_end=MPI_Wtime(); printf("total time is: %lf\n", time_end - time_start ); /* computing and printing the total time used to calculate final result */ } free(sub_array); /* free the allocated memory of the matrix */ MPI_Finalize(); /* End the mpi_enviroment and terminate the programm */ return 0; } /************************************************************************** * subroutine prtdat **************************************************************************/ void prtdat(int nx, int ny, float *u1, char *fnam) { int ix, iy; FILE *fp; fp = fopen(fnam, "w"); for (iy = ny-1; iy >= 0; iy--) { for (ix = 0; ix <= nx-1; ix++) { fprintf(fp, "%6.1f", *(u1+ix*ny+iy)); if (ix != nx-1) fprintf(fp, " "); else fprintf(fp, "\n"); } } fclose(fp); } /******************************************************************************* * subroutine, that initializes the array with random nubers between the limits given in the "defined" sector ******************************************************************************/ int init_array(float** array, int y_size,int x_size, int neighbors[4]){ int ix,iy; float *temp_array = calloc(sizeof(float) , 2*(y_size+2) * (x_size+2)); if (temp_array==NULL){ printf("Error, cannot allocate memory\n"); return ERROR_CODE; } for(iy = 1; iy < y_size+1; iy++){ if( !( neighbors[NORTH] == MPI_PROC_NULL && iy == 1 ) && !(neighbors[SOUTH] == MPI_PROC_NULL && iy == y_size) ){ for(ix = 1; ix < x_size+1 ; ix++){ if( !(neighbors[WEST] == MPI_PROC_NULL && ix == 1) && !(neighbors[EAST] == MPI_PROC_NULL && ix == x_size) ){ *(temp_array + ix + iy*(x_size+2)) = (float)(my_rank); //MIN_TEMP + rand()%(MAX_TEMP - MIN_TEMP); } } } } *array = temp_array; return OK; } /****************************************************************************** *subroutine for printing subarray *******************************************************************************/ void print_array(float* array, int y_size,int x_size){ int ix,iy; for(iy=0;iy<y_size;iy++){ for(ix=0;ix<x_size;ix++){ printf("%6.1f ",*(array + ix + iy * x_size)); } printf("\n"); } } /****************************************************************************** * subroutine update ******************************************************************************/ void update(int start_x, int end_x, int start_y, int end_y, int x_size, float *u1, float *u2) { int ix, iy; #pragma omp for schedule(static) for(iy = start_y; iy<=end_y;iy++){ for (ix = start_x; ix <= end_x; ix++){ *(u2+ix+iy*x_size) = *(u1+ix+iy*x_size) + parms.cx * (*(u1+(ix+1)+iy*x_size) + *(u1+(ix-1)+iy*x_size) - 2.0 * *(u1+ix+iy*x_size)) + parms.cy * (*(u1+ix+(iy+1)*x_size) + *(u1+ix+(iy-1)*x_size) - 2.0 * *(u1+ix+iy*x_size)); } } } /****************************************************************************** subroutine update_canvas that computes the values of the dependent elements of the sub_array ("green slots") *******************************************************************************/ void update_canvas(int left_border_x, int right_border_x, int up_border_y, int down_border_y, int x_size, int neighbors[4], float *u1, float *u2){ int ix,iy,end,start; if(neighbors[NORTH] != MPI_PROC_NULL){ iy = up_border_y; start = left_border_x + ((neighbors[WEST] == MPI_PROC_NULL) ? 1 : 0); end = right_border_x - ((neighbors[EAST] == MPI_PROC_NULL) ? 1 : 0); #pragma omp for schedule(static) for(ix = start ; ix <= end; ix++){ /* updating northern border of green slots */ *(u2+ix+iy*x_size) = *(u1+ix+iy*x_size) + parms.cx * (*(u1+(ix+1)+iy*x_size) + *(u1+(ix-1)+iy*x_size) - 2.0 * *(u1+ix+iy*x_size)) + parms.cy * (*(u1+ix+(iy+1)*x_size) + *(u1+ix+(iy-1)*x_size) - 2.0 * *(u1+ix+iy*x_size)); } } if(neighbors[SOUTH] != MPI_PROC_NULL){ iy = down_border_y; start = left_border_x + ((neighbors[WEST] == MPI_PROC_NULL) ? 1 : 0); end = right_border_x - ((neighbors[EAST] == MPI_PROC_NULL) ? 1 : 0); #pragma omp for schedule(static) for(ix = start ; ix <= end; ix++){ /* updating southern border of green slots */ *(u2+ix+iy*x_size) = *(u1+ix+iy*x_size) + parms.cx * (*(u1+(ix+1)+iy*x_size) + *(u1+(ix-1)+iy*x_size) - 2.0 * *(u1+ix+iy*x_size)) + parms.cy * (*(u1+ix+(iy+1)*x_size) + *(u1+ix+(iy-1)*x_size) - 2.0 * *(u1+ix+iy*x_size)); } } if(neighbors[WEST] != MPI_PROC_NULL){ ix = left_border_x; start = up_border_y + ((neighbors[NORTH] == MPI_PROC_NULL) ? 1 : 0); end = down_border_y - ((neighbors[SOUTH] == MPI_PROC_NULL) ? 1 : 0); #pragma omp for schedule(static) for(iy = start; iy <= down_border_y ; iy++){ /* updating western border of green slots */ *(u2+ix+iy*x_size) = *(u1+ix+iy*x_size) + parms.cx * (*(u1+(ix+1)+iy*x_size) + *(u1+(ix-1)+iy*x_size) - 2.0 * *(u1+ix+iy*x_size)) + parms.cy * (*(u1+ix+(iy+1)*x_size) + *(u1+ix+(iy-1)*x_size) - 2.0 * *(u1+ix+iy*x_size)); } } if(neighbors[EAST] != MPI_PROC_NULL){ ix = right_border_x; start = up_border_y + ((neighbors[NORTH] == MPI_PROC_NULL) ? 1 : 0); end = down_border_y - ((neighbors[SOUTH] == MPI_PROC_NULL) ? 1 : 0); #pragma omp for schedule(static) for(iy = start; iy <= end; iy++){ /* updating eastern border of green slots */ *(u2+ix+iy*x_size) = *(u1+ix+iy*x_size) + parms.cx * (*(u1+(ix+1)+iy*x_size) + *(u1+(ix-1)+iy*x_size) - 2.0 * *(u1+ix+iy*x_size)) + parms.cy * (*(u1+ix+(iy+1)*x_size) + *(u1+ix+(iy-1)*x_size) - 2.0 * *(u1+ix+iy*x_size)); } } }
elemwise_binary_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file elemwise_binary_op.h * \brief Function definition of elementwise binary operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #include <mxnet/operator_util.h> #include <mxnet/op_attr_types.h> #include <vector> #include <string> #include <utility> #include <typeinfo> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../../engine/openmp.h" #include "elemwise_unary_op.h" #include "../../common/utils.h" #include "./init_op.h" namespace mxnet { namespace op { /*! Gather binary operator functions into ElemwiseBinaryOp class */ class ElemwiseBinaryOp : public OpBase { public: /*! \brief For sparse, assume missing rvalue is 0 */ template<typename OP, int Req> struct MissingRValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0))); } }; /*! \brief For sparse, assume missing lvalue is 0 */ template<typename OP, int Req> struct MissingLValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i])); } }; private: /*! * \brief CSR operation requires temp space */ enum ResourceRequestType { kTempSpace }; /*! * \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input * CPU-Only version */ template<typename DType, typename OP, typename xpu> static inline size_t FillDense(mshadow::Stream<xpu> *s, const size_t idx_l, const size_t idx_r, const OpReqType req, mshadow::Tensor<xpu, 2, DType> *out, const size_t iter_out) { const int index_out_min = static_cast<int>(std::min(idx_l, idx_r)); if (static_cast<size_t>(index_out_min) > iter_out) { const DType zero_input_val = OP::Map(DType(0), DType(0)); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) { Fill<false>(s, (*out)[i], req, zero_input_val); } } return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int' } static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) { return a1.var() == a2.var(); } public: /*! \brief Minimum of three */ static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) { return a < b ? (a < c ? a : c) : (b < c ? b : c); } private: template<typename xpu, typename LOP, typename ROP, typename DType> static void BackwardUseNone_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; Stream<xpu> *s = ctx.get_stream<xpu>(); const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes); const DType *ograd_dptr = inputs[0].dptr<DType>(); if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>()); } else if (req[0] != kNullOp) { DType *lgrad_dptr = outputs[0].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr); }); } if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>()); } else if (req[1] != kNullOp) { DType *rgrad_dptr = outputs[1].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr); }); } } template<typename xpu, typename LOP, typename ROP, typename DType> static void BackwardUseIn_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(outputs.size(), 2U); DCHECK_EQ(inputs.size(), 3U); mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>(); const DType *ograd_dptr = inputs[0].dptr<DType>(); const DType *lhs_dptr = inputs[1].dptr<DType>(); const DType *rhs_dptr = inputs[2].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { const int size = static_cast<int>( (outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * lgrad_dptr = outputs[0].dptr<DType>(); mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, xpu>::Launch( s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { const int size = static_cast<int>( (outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * rgrad_dptr = outputs[1].dptr<DType>(); mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, xpu>::Launch( s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); } template< typename xpu, typename LOP, typename ROP, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false, typename BackupCompute> static inline void RspRspOpBackward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs, BackupCompute backup_compute) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); // lhs grad if (req[0] != kNullOp) { // RspRspOp can handle dense outputs so long as OP(0, 0) == 0 RspRspOp<LOP>( s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0], false, false, false, false); // lhs in-place RspRspOp<op::mshadow_op::mul>( s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0], false, false, true, false); } // rhs grad if (req[1] != kNullOp) { RspRspOp<ROP>( s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1], false, false, false, false); // rhs in-place RspRspOp<op::mshadow_op::mul>( s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1], false, false, true, false); } } template<typename xpu, typename LOP, typename ROP> static inline void DnsCsrCsrOpBackward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { const bool supported_ops = std::is_same<mshadow_op::right, LOP>::value && std::is_same<mshadow_op::left, ROP>::value; CHECK(supported_ops) << "Only backward for mul is supported (LOP should be right, ROP should be left)"; const NDArray& out_grad = inputs[0]; const NDArray& lhs_in = inputs[1]; const NDArray& rhs_in = inputs[2]; const NDArray& lhs_grad = outputs[0]; const NDArray& rhs_grad = outputs[1]; const bool reverse = (outputs[0].storage_type() == kCSRStorage); if (reverse) { DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, rhs_in, req[0], lhs_grad, false); Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), lhs_in.data()}, {req[1]}, {rhs_grad.data()}); } else { DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, lhs_in, req[1], rhs_grad, false); Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), rhs_in.data()}, {req[0]}, {lhs_grad.data()}); } } public: /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename OP> static void RspRspOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename OP> static void RspRspOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void CsrCsrOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void CsrCsrOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void DnsCsrDnsOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void DnsCsrDnsOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename xpu, typename OP> static void DnsCsrCsrOp(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- RSP binary operator for non-canonical NDArray */ template<typename xpu, typename OP> static void DnsRspDnsOp(mshadow::Stream<xpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); public: /*! * \brief Rsp-op-Rsp operation which produces a dense result * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); /*! * \brief Allow one of the binary inputs to be dense and still produce a sparse output. * Typically used for sparse * dense = sparse. * Note: for csr, it dispatches to fallback other than csr, csr -> csr * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool PreferSparseStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { using namespace common; CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name; CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name; const auto& lhs_stype = in_attrs->at(0); const auto& rhs_stype = in_attrs->at(1); auto& out_stype = out_attrs->at(0); bool dispatched = false; const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { // dns, dns -> dns dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) { // rsp, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ContainsOnlyStorage(*in_attrs, kCSRStorage)) { // csr, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage))) { // rsp, dns -> rsp // dns, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage))) { // csr, dns -> csr // dns, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched) { dispatched = dispatch_fallback(out_attrs, dispatch_mode); } return dispatched; } /*! * \brief Allow one of the inputs to be dense and produce a dense output, * for rsp inputs only support when both inputs are rsp type. * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ template<bool cpu_only, bool rsp, bool csr> static bool PreferDenseStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { using namespace common; CHECK_EQ(in_attrs->size(), 2); CHECK_EQ(out_attrs->size(), 1); const auto lhs_stype = (*in_attrs)[0]; const auto rhs_stype = (*in_attrs)[1]; bool dispatched = false; const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { // dns, dns ... -> dns dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) { // rsp, rsp, ... -> rsp dispatched = storage_type_assign(out_attrs, kRowSparseStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched && csr && ContainsOnlyStorage(*in_attrs, kCSRStorage)) { // csr, csr, ... -> csr dispatched = storage_type_assign(out_attrs, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) || (lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) { // dense, csr -> dense / csr, dense -> dense dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage))) { // dense, rsp -> dense / rsp, dense -> dense dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched) { dispatch_fallback(out_attrs, dispatch_mode); } return true; } /*! * \brief Backward pass computing input gradient using forward inputs * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } } template<typename xpu, typename OP> static void ComputeLogic(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<bool>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } } template<typename xpu, typename OP> static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); if ((ContainsOnlyStorage(inputs, kRowSparseStorage)) && (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) { // rsp, rsp -> rsp // rsp, rsp -> dns RspRspOp<OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false); } else if (ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) { // csr, csr -> csr CsrCsrOp<OP>(s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]); } else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) && out_stype == kDefaultStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kCSRStorage); DnsCsrDnsOp<OP>(s, attrs, ctx, dns, csr, req[0], outputs[0], reverse); } else if (((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && out_stype == kDefaultStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kRowSparseStorage); const NDArray& rsp = (reverse)? inputs[0] : inputs[1]; DnsRspDnsOp<xpu, OP>(s, attrs, ctx, dns, rsp, req[0], outputs[0], reverse); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } /*! \brief ComputeEx allowing dense lvalue and/or rvalue */ template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense> static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) && ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && lhs_may_be_dense && rhs_may_be_dense) { // rsp, rsp -> rsp // rsp, rsp -> dns // rsp, dns -> rsp // dns, rsp -> rsp // More than once dense not allowed (this will be checked in RspRspOp): // rsp, dns -> dns <-- NOT ALLOWED // dns, rsp -> dns <-- NOT ALLOWED mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); RspRspOp<OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs); } else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) && out_stype == kCSRStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kCSRStorage); DnsCsrCsrOp<xpu, OP>(attrs, ctx, dns, csr, req[0], outputs[0], reverse); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 1U); // output grad CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto in_stype = inputs[0].storage_type(); const auto lhs_stype = outputs[0].storage_type(); const auto rhs_stype = outputs[1].storage_type(); // lhs grad if (req[0] != kNullOp) { if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> rsp, _. op requires 0-input returns 0-output DCHECK_LT(std::fabs(static_cast<float>(LOP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } // rhs grad if (req[1] != kNullOp) { if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> _, rsp. op requires 0-input returns 0-output DCHECK_LT(std::fabs(static_cast<float>(ROP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template< typename xpu, typename LOP, typename ROP, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false> static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto out_grad_stype = inputs[0].storage_type(); const auto lhs_grad_stype = outputs[0].storage_type(); const auto rhs_grad_stype = outputs[1].storage_type(); if (ContainsOnlyStorage(inputs, kRowSparseStorage) && (lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) && (rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) { // rsp, rsp, rsp -> [dns, rsp], [dns, rsp] RspRspOpBackward<xpu, LOP, ROP, in0_ok_dense, in1_ok_dense, in2_ok_dense>( attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>); } if (((lhs_grad_stype == kDefaultStorage && rhs_grad_stype == kCSRStorage) || (lhs_grad_stype == kCSRStorage && rhs_grad_stype == kDefaultStorage)) && out_grad_stype == kDefaultStorage) { // dns, csr, dns -> [csr, dns] / csr, dns, dns -> [dns, csr] DnsCsrCsrOpBackward<xpu, LOP, ROP>(attrs, ctx, inputs, req, outputs); } } }; // class ElemwiseBinaryOp /*! \brief Binary launch */ #define MXNET_OPERATOR_REGISTER_BINARY(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(2) \ .set_num_outputs(1) \ .set_attr<nnvm::FListInputNames>("FListInputNames", \ [](const NodeAttrs& attrs) { \ return std::vector<std::string>{"lhs", "rhs"}; \ }) \ .set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \ }) \ .add_argument("lhs", "NDArray-or-Symbol", "first input") \ .add_argument("rhs", "NDArray-or-Symbol", "second input") /*! \brief Binary launch, with FComputeEx for csr and rsp available */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseStorageType<2, 1, true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, with FComputeEx for csr and rsp available. when inputs contain both sparse and dense, sparse output is preferred. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PS(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::PreferSparseStorageType) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, dense result * FInferStorageType attr is not set using this macro. * By default DefaultStorageType is used. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::SparseSparseWithDenseResult) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) /*! \brief Binary launch, with FComputeEx for prefer dense */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PD(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
task2.h
#include <iostream> #include <omp.h> #include <chrono> #include "util.h" inline int multiplyP(int* A, int* B, int* C, const size_t& ah, const size_t& aw, const size_t& bh, const size_t& bw) { const int cw = bw; const int ch = ah; const int size = ch * cw; if (aw != bh) { throw "Matrix dimensions must agree"; } #pragma omp parallel for for (auto idx = 0; idx < size; ++idx) { const int y = idx / cw; const int x = idx % cw; int sum = 0; for (auto i = 0; i < bh; ++i) { sum += A[aw * y + i] * B[bw * i + x]; } C[idx] = sum; } } inline int multiply(int* A, int* B, int* C, const size_t& ah, const size_t& aw, const size_t& bh, const size_t& bw) { const int cw = bw; const int ch = ah; const int size = ch * cw; if (aw != bh) { throw "Matrix dimensions must agree"; } for (auto idx = 0; idx < size; ++idx) { const int y = idx / cw; const int x = idx % cw; int sum = 0; for (auto i = 0; i < bh; ++i) { sum += A[aw * y + i] * B[bw * i + x]; } C[idx] = sum; } } inline bool equals(const int* A, const int* B, const size_t& size) { for (auto i = 0; i < size; ++i) { if (A[i] != B[i]) { return false; } } return true; } int task2() { omp_set_num_threads(6); const int h = 20000; const int m = 100; const int w = 6000; srand(999); const auto seed = rand(); auto begin = std::chrono::system_clock::now(), end = std::chrono::system_clock::now(); double avgSeq = 0; double avgPar = 0; int res = 0; for (auto i = 0; i < 20; ++i) { std::cout << "\r" << i + 1 << std::flush; const size_t h = rand() % 300 + 300; const size_t m = rand() % 300 + 300; const size_t w = rand() % 300 + 300; const size_t ah = h; const size_t aw = m; const size_t bh = m; const size_t bw = w; auto A = new int[ah * aw]; auto B = new int[bh * bw]; auto C = new int[h * w]; fill(A, ah * aw, rand()); fill(B, bh * bw, rand()); begin = std::chrono::system_clock::now(); res = multiplyP(A, B, C, ah, aw, bh, bw); end = std::chrono::system_clock::now(); avgPar += std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count(); begin = std::chrono::system_clock::now(); res = multiply(A, B, C, ah, aw, bh, bw); end = std::chrono::system_clock::now(); avgSeq += std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count(); } std::cout << "\r" << "S:" << avgSeq / 100 << "\n" << "P:" << avgPar / 100 << std::endl; }
3.race1.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N][N]; for (int i = 1; i < N; i++) #pragma omp parallel for for (int j = 1; j < N; j++) for (int k = 1; k < N; k++) A[i][j][k] = A[i][j - 1][k]; } // CHECK: Data Race detected // END
dormqr.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zunmqr.c, normal z -> d, Fri Sep 28 17:38:04 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_unmqr * * Overwrites the general complex m-by-n matrix C with * * side = PlasmaLeft side = PlasmaRight * trans = PlasmaNoTrans Q * C C * Q * trans = PlasmaTrans Q^T * C C * Q^T * * where Q is an orthogonal (or orthogonal) matrix defined as the product of k * elementary reflectors * * Q = H(1) H(2) . . . H(k) * * as returned by plasma_dgeqrf. Q is of order m if side = PlasmaLeft * and of order n if side = PlasmaRight. * ******************************************************************************* * * @param[in] side * Intended usage: * - PlasmaLeft: apply Q or Q^T from the left; * - PlasmaRight: apply Q or Q^T from the right. * * @param[in] trans * Intended usage: * - PlasmaNoTrans: No transpose, apply Q; * - PlasmaTrans: Transpose, apply Q^T. * * @param[in] m * The number of rows of the matrix C. m >= 0. * * @param[in] n * The number of columns of the matrix C. n >= 0. * * @param[in] k * The number of elementary reflectors whose product defines * the matrix Q. * If side == PlasmaLeft, m >= k >= 0. * If side == PlasmaRight, n >= k >= 0. * * @param[in] pA * Details of the QR factorization of the original matrix A as returned * by plasma_dgeqrf. * * @param[in] lda * The leading dimension of the array A. * If side == PlasmaLeft, lda >= max(1,m). * If side == PlasmaRight, lda >= max(1,n). * * @param[in] T * Auxiliary factorization data, computed by plasma_dgeqrf. * * @param[in,out] pC * On entry, pointer to the m-by-n matrix C. * On exit, C is overwritten by Q*C, Q^T*C, C*Q, or C*Q^T. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1,m). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_dormqr * @sa plasma_cunmqr * @sa plasma_dormqr * @sa plasma_sormqr * @sa plasma_dgeqrf * ******************************************************************************/ int plasma_dormqr(plasma_enum_t side, plasma_enum_t trans, int m, int n, int k, double *pA, int lda, plasma_desc_t T, double *pC, int ldc) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((side != PlasmaLeft) && (side != PlasmaRight)) { plasma_error("illegal value of side"); return -1; } if ((trans != PlasmaTrans) && (trans != PlasmaNoTrans)) { plasma_error("illegal value of trans"); return -2; } if (m < 0) { plasma_error("illegal value of m"); return -3; } if (n < 0) { plasma_error("illegal value of n"); return -4; } int am; if (side == PlasmaLeft) { am = m; } else { am = n; } if ((k < 0) || (k > am)) { plasma_error("illegal value of k"); return -5; } if (lda < imax(1, am)) { plasma_error("illegal value of lda"); return -7; } if (ldc < imax(1, m)) { plasma_error("illegal value of ldc"); return -10; } // quick return if (m == 0 || n == 0 || k == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_geqrf(plasma, PlasmaRealDouble, m, n); // Set tiling parameters. int ib = plasma->ib; int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t C; int retval; retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, am, k, 0, 0, am, k, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, m, n, 0, 0, m, n, &C); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Allocate workspace. plasma_workspace_t work; size_t lwork = ib*nb; // unmqr: work retval = plasma_workspace_create(&work, lwork, PlasmaRealDouble); if (retval != PlasmaSuccess) { plasma_error("plasma_workspace_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_dge2desc(pA, lda, A, &sequence, &request); plasma_omp_dge2desc(pC, ldc, C, &sequence, &request); // Call the tile async function. plasma_omp_dormqr(side, trans, A, T, C, work, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_ddesc2ge(C, pC, ldc, &sequence, &request); } // implicit synchronization plasma_workspace_destroy(&work); // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&C); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_unmqr * * Non-blocking tile version of plasma_dormqr(). * May return before the computation is finished. * Allows for pipelining of operations at runtime. * ******************************************************************************* * @param[in] side * Intended usage: * - PlasmaLeft: apply Q or Q^T from the left; * - PlasmaRight: apply Q or Q^T from the right. * * @param[in] trans * Intended usage: * - PlasmaNoTrans: apply Q; * - PlasmaTrans: apply Q^T. * * @param[in] A * Descriptor of matrix A stored in the tile layout. * Details of the QR factorization of the original matrix A as returned * by plasma_dgeqrf. * * @param[in] T * Descriptor of matrix T. * Auxiliary factorization data, computed by plasma_dgeqrf. * * @param[in,out] C * Descriptor of matrix C. * On entry, the m-by-n matrix C. * On exit, C is overwritten by Q*C, Q^T*C, C*Q, or C*Q^T. * * @param[in] work * Workspace for the auxiliary arrays needed by some coreblas kernels. * For multiplication by Q contains preallocated space for work * arrays. Allocated by the plasma_workspace_create function. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_dormqr * @sa plasma_omp_cunmqr * @sa plasma_omp_dormqr * @sa plasma_omp_sormqr * @sa plasma_omp_dgeqrf * ******************************************************************************/ void plasma_omp_dormqr(plasma_enum_t side, plasma_enum_t trans, plasma_desc_t A, plasma_desc_t T, plasma_desc_t C, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((side != PlasmaLeft) && (side != PlasmaRight)) { plasma_error("invalid value of side"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((trans != PlasmaTrans) && (trans != PlasmaNoTrans)) { plasma_error("invalid value of trans"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(T) != PlasmaSuccess) { plasma_error("invalid T"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(C) != PlasmaSuccess) { plasma_error("invalid C"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (C.m == 0 || C.n == 0 || A.m == 0 || A.n == 0) return; // Call the parallel function. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pdormqr_tree(side, trans, A, T, C, work, sequence, request); } else { plasma_pdormqr(side, trans, A, T, C, work, sequence, request); } }
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/property.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/enhance.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* Forward declarations. */ static MagickBooleanType TransformsRGBImage(Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o l o r s p a c e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageColorspaceType() returns the potential type of image: % sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc. % % To ensure the image type matches its potential, use SetImageColorspaceType(): % % (void) SetImageColorspaceType(image,GetImageColorspaceType(image), % exception); % % The format of the GetImageColorspaceType method is: % % ColorspaceType GetImageColorspaceType(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ColorspaceType GetImageColorspaceType(const Image *image, ExceptionInfo *exception) { ColorspaceType colorspace; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colorspace=image->colorspace; type=IdentifyImageType(image,exception); if ((type == BilevelType) || (type == GrayscaleType) || (type == GrayscaleAlphaType)) colorspace=GRAYColorspace; return(colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + s R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % sRGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the sRGBTransformImage method is: % % MagickBooleanType sRGBTransformImage(Image *image, % const ColorspaceType colorspace,EsceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertAdobe98ToRGB(const double r,const double g, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertAdobe98ToXYZ(r,g,b,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertDisplayP3ToRGB(const double r,const double g, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertDisplayP3ToXYZ(r,g,b,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertProPhotoToRGB(const double r,const double g, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertProPhotoToXYZ(r,g,b,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertRGBToCMY(const double red,const double green, const double blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static void ConvertRGBToAdobe98(const double red,const double green, const double blue,double *r,double *g,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToAdobe98(X,Y,Z,r,g,b); } static void ConvertRGBToDisplayP3(const double red,const double green, const double blue,double *r,double *g,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToDisplayP3(X,Y,Z,r,g,b); } static void ConvertRGBToProPhoto(const double red,const double green, const double blue,double *r,double *g,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToProPhoto(X,Y,Z,r,g,b); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const double red,const double green, const double blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLuv(const double red,const double green, const double blue,const IlluminantType illuminant,double *L,double *u, double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,illuminant,L,u,v); } static void ConvertRGBToxyY(const double red,const double green, const double blue,double *low_x,double *low_y,double *cap_Y) { double gamma, X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); gamma=PerceptibleReciprocal(X+Y+Z); *low_x=gamma*X; *low_y=gamma*Y; *cap_Y=Y; } static void inline ConvertXYZToJzazbz(const double X,const double Y, const double Z,const double white_luminance,double *Jz,double *az,double *bz) { #define Jzazbz_b 1.15 /* https://observablehq.com/@jrus/jzazbz */ #define Jzazbz_g 0.66 #define Jzazbz_c1 (3424.0/4096.0) #define Jzazbz_c2 (2413.0/128.0) #define Jzazbz_c3 (2392.0/128.0) #define Jzazbz_n (2610.0/16384.0) #define Jzazbz_p (1.7*2523.0/32.0) #define Jzazbz_d (-0.56) #define Jzazbz_d0 (1.6295499532821566e-11) double gamma, Iz, L, Lp, M, Mp, S, Sp, Xp, Yp, Zp; Xp=(Jzazbz_b*X-Z*(Jzazbz_b-1)); Yp=(Jzazbz_g*Y-X*(Jzazbz_g-1)); Zp=Z; L=0.41478972*Xp+0.579999*Yp+0.0146480*Zp; M=(-0.2015100)*Xp+1.120649*Yp+0.0531008*Zp; S=(-0.0166008)*Xp+0.264800*Yp+0.6684799*Zp; gamma=pow(L*PerceptibleReciprocal(white_luminance),Jzazbz_n); Lp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p); gamma=pow(M*PerceptibleReciprocal(white_luminance),Jzazbz_n); Mp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p); gamma=pow(S*PerceptibleReciprocal(white_luminance),Jzazbz_n); Sp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p); Iz=0.5*Lp+0.5*Mp; *az=3.52400*Lp-4.066708*Mp+0.542708*Sp+0.5; *bz=0.199076*Lp+1.096799*Mp-1.295875*Sp+0.5; *Jz=((Jzazbz_d+1.0)*Iz)/(Jzazbz_d*Iz+1.0)-Jzazbz_d0; } static void inline ConvertJzazbzToXYZ(const double Jz,const double az, const double bz,const double white_luminance,double *X,double *Y,double *Z) { double azz, bzz, gamma, Iz, L, Lp, M, Mp, S, Sp, Xp, Yp, Zp; gamma=Jz+Jzazbz_d0; Iz=gamma/(Jzazbz_d-Jzazbz_d*gamma+1.0); azz=az-0.5; bzz=bz-0.5; Lp=Iz+0.138605043271539*azz+0.0580473161561189*bzz; Mp=Iz-0.138605043271539*azz-0.0580473161561189*bzz; Sp=Iz-0.0960192420263189*azz-0.811891896056039*bzz; gamma=pow(Lp,1.0/Jzazbz_p); L=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/ Jzazbz_n); gamma=pow(Mp,1.0/Jzazbz_p); M=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/ Jzazbz_n); gamma=pow(Sp,1.0/Jzazbz_p); S=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/ Jzazbz_n); Xp=1.92422643578761*L-1.00479231259537*M+0.037651404030618*S; Yp=0.350316762094999*L+0.726481193931655*M-0.065384422948085*S; Zp=(-0.0909828109828476)*L-0.312728290523074*M+1.52276656130526*S; *X=(Xp+(Jzazbz_b-1.0)*Zp)/Jzazbz_b; *Y=(Yp+(Jzazbz_g-1.0)**X)/Jzazbz_g; *Z=Zp; } static void ConvertRGBToJzazbz(const double red,const double green, const double blue,const double white_luminance,double *Jz,double *az, double *bz) { double X, Y, Z; ConvertRGBToXYZ(red,blue,green,&X,&Y,&Z); ConvertXYZToJzazbz(X,Y,Z,white_luminance,Jz,az,bz); } static void ConvertJzazbzToRGB(const double Jz,const double az, const double bz,const double white_luminance,double *red,double *green, double *blue) { double X, Y, Z; ConvertJzazbzToXYZ(Jz,az,bz,white_luminance,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,blue,green); } static void ConvertRGBToYDbDr(const double red,const double green, const double blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const double red,const double green, const double blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } static void ConvertRGBToYPbPr(const double red,const double green, const double blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const double red,const double green, const double blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const double red,const double green, const double blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static MagickBooleanType sRGBTransformImage(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { #define sRGBTransformImageTag "RGBTransform/Image" CacheView *image_view; const char *artifact; IlluminantType illuminant = D65Illuminant; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); artifact=GetImageArtifact(image,"color:illuminant"); if (artifact != (const char *) NULL) { illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions, MagickFalse,artifact); if ((ssize_t) illuminant < 0) illuminant=UndefinedIlluminant; } status=MagickTrue; progress=0; switch (colorspace) { case CMYKColorspace: { PixelInfo zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertRGBToCMYK(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType gray; gray=0.212656*DecodePixelGamma(GetPixelRed(image,q))+0.715158* DecodePixelGamma(GetPixelGreen(image,q))+0.072186* DecodePixelGamma(GetPixelBlue(image,q)); SetPixelGray(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType gray; gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+ 0.072186*GetPixelBlue(image,q); SetPixelGray(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case Adobe98Colorspace: case DisplayP3Colorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case JzazbzColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case ProPhotoColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { const char *value; double white_luminance; /* Transform image from sRGB to target colorspace. */ white_luminance=10000.0; value=GetImageProperty(image,"white-luminance",exception); if (value != (const char *) NULL) white_luminance=StringToDouble(value,(char **) NULL); if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case Adobe98Colorspace: { ConvertRGBToAdobe98(red,green,blue,&X,&Y,&Z); break; } case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case DisplayP3Colorspace: { ConvertRGBToDisplayP3(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case JzazbzColorspace: { ConvertRGBToJzazbz(red,green,blue,white_luminance,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,illuminant,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,illuminant,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,illuminant,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,illuminant,&X,&Y,&Z); break; } case ProPhotoColorspace: { ConvertRGBToProPhoto(red,green,blue,&X,&Y,&Z); break; } case xyYColorspace: { ConvertRGBToxyY(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: { X=QuantumScale*red; Y=QuantumScale*green; Z=QuantumScale*blue; break; } } SetPixelRed(image,ClampToQuantum(QuantumRange*X),q); SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q); SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002* PerceptibleReciprocal(film_gamma)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002* PerceptibleReciprocal(film_gamma)))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=(double) DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=(double) DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q); SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))], q); SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212656*R+0.715158*G+0.072186*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.005382*i; x_map[i].y=(-0.003296)*i; x_map[i].z=0.009410*i; y_map[i].x=0.010566*i; y_map[i].y=(-0.006471)*i; y_map[i].z=(-0.007880)*i; z_map[i].x=0.002052*i; z_map[i].y=0.009768*i; z_map[i].z=(-0.001530)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.298839*(1.099*i-0.099); x_map[i].y=(-0.298839)*(1.099*i-0.099); x_map[i].z=0.70100*(1.099*i-0.099); y_map[i].x=0.586811*(1.099*i-0.099); y_map[i].y=(-0.586811)*(1.099*i-0.099); y_map[i].z=(-0.586811)*(1.099*i-0.099); z_map[i].x=0.114350*(1.099*i-0.099); z_map[i].y=0.88600*(1.099*i-0.099); z_map[i].z=(-0.114350)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); x_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].x=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; z_map[i].y=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; Quantum *magick_restrict q; ssize_t x; unsigned int blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(image,q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(image,q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(image,q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ primary_info.z; SetPixelRed(image,ScaleMapToQuantum(pixel.red),q); SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q); SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,sRGBTransformImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { unsigned int blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red); image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green); image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptiionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { ImageType type; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000/2.200; (void) memset(&image->chromaticity,0,sizeof(image->chromaticity)); type=image->type; if (IsGrayColorspace(colorspace) != MagickFalse) { if (colorspace == LinearGRAYColorspace) image->gamma=1.000; type=GrayscaleType; } else if ((IsRGBColorspace(colorspace) != MagickFalse) || (colorspace == XYZColorspace) || (colorspace == xyYColorspace)) image->gamma=1.000; else { image->rendering_intent=PerceptualIntent; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } status=SyncImagePixelCache(image,exception); image->type=type; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageGray() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities and changes the type of the image to % bi-level or grayscale. % % The format of the SetImageGray method is: % % MagickBooleanType SetImageGray(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageGray(Image *image, ExceptionInfo *exception) { const char *value; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsImageGray(image) != MagickFalse) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale",exception); if (IsStringFalse(value) != MagickFalse) return(MagickFalse); type=IdentifyImageGray(image,exception); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMonochrome() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange and changes the type of the image to bi-level. % % The format of the SetImageMonochrome method is: % % MagickBooleanType SetImageMonochrome(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMonochrome(Image *image, ExceptionInfo *exception) { MagickBooleanType is_bilevel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsImageMonochrome(image) != MagickFalse) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); is_bilevel=IdentifyImageMonochrome(image,exception); if (is_bilevel == MagickFalse) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=BilevelType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace, changing the % image data to reflect the new colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(SetImageColorspace(image,colorspace,exception)); (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace,exception)); /* Convert the reference image from an alternate colorspace to sRGB. */ if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformsRGBImage(image,exception)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformsRGBImage(image,exception); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (sRGBTransformImage(image,colorspace,exception) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m s R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformsRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values % to be [0..QuantumRange]. % % The format of the TransformsRGBImage method is: % % MagickBooleanType TransformsRGBImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,double *red,double *green,double *blue) { *red=QuantumRange*(1.0-cyan); *green=QuantumRange*(1.0-magenta); *blue=QuantumRange*(1.0-yellow); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,double *red,double *green,double *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,const IlluminantType illuminant,double *red,double *green, double *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,illuminant,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const double value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertLabToRGB(const double L,const double a, const double b,const IlluminantType illuminant,double *red,double *green, double *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),illuminant,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertxyYToRGB(const double low_x,const double low_y, const double cap_Y,double *red,double *green,double *blue) { double gamma, X, Y, Z; gamma=PerceptibleReciprocal(low_y); X=gamma*cap_Y*low_x; Y=cap_Y; Z=gamma*cap_Y*(1.0-low_x-low_y); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, double *red,double *green,double *blue) { *red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+ 1.4019995886561440468*(Pr-0.5)); *green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)- 0.71413649331646789076*(Pr-0.5)); *blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+ 2.1453384174593273e-06*(Pr-0.5)); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,double *red,double *green,double *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, double *red,double *green,double *blue) { *red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754* (Q-0.5)); *green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427* (Q-0.5)); *blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374* (Q-0.5)); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, double *red,double *green,double *blue) { *red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5)); *green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5)); *blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5)); } static void ConvertYUVToRGB(const double Y,const double U,const double V, double *red,double *green,double *blue) { *red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825* (V-0.5)); *green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797* (V-0.5)); *blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04* (V-0.5)); } static MagickBooleanType TransformsRGBImage(Image *image, ExceptionInfo *exception) { #define TransformsRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000f }; CacheView *image_view; const char *artifact; IlluminantType illuminant = D65Illuminant; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); artifact=GetImageArtifact(image,"color:illuminant"); if (artifact != (const char *) NULL) { illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions, MagickFalse,artifact); if ((ssize_t) illuminant < 0) illuminant=UndefinedIlluminant; } status=MagickTrue; progress=0; switch (image->colorspace) { case CMYKColorspace: { PixelInfo zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertCMYKToRGB(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: { /* Transform linear GRAY to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=0.212656*EncodePixelGamma(GetPixelRed(image,q))+0.715158* EncodePixelGamma(GetPixelGreen(image,q))+0.072186* EncodePixelGamma(GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(gray),q); SetPixelGreen(image,ClampToQuantum(gray),q); SetPixelBlue(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: { /* Transform linear GRAY to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+ 0.072186*GetPixelBlue(image,q); SetPixelRed(image,ClampToQuantum(gray),q); SetPixelGreen(image,ClampToQuantum(gray),q); SetPixelBlue(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case Adobe98Colorspace: case CMYColorspace: case DisplayP3Colorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case JzazbzColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case ProPhotoColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { const char *value; double white_luminance; /* Transform image from source colorspace to sRGB. */ white_luminance=10000.0; value=GetImageProperty(image,"white-luminance",exception); if (value != (const char *) NULL) white_luminance=StringToDouble(value,(char **) NULL); if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; X=QuantumScale*GetPixelRed(image,q); Y=QuantumScale*GetPixelGreen(image,q); Z=QuantumScale*GetPixelBlue(image,q); switch (image->colorspace) { case Adobe98Colorspace: { ConvertAdobe98ToRGB(X,Y,Z,&red,&green,&blue); break; } case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case DisplayP3Colorspace: { ConvertDisplayP3ToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case JzazbzColorspace: { ConvertJzazbzToRGB(X,Y,Z,white_luminance,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,illuminant,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,illuminant,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,illuminant,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,illuminant,&red,&green,&blue); break; } case ProPhotoColorspace: { ConvertProPhotoToRGB(X,Y,Z,&red,&green,&blue); break; } case xyYColorspace: { ConvertxyYToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: { red=QuantumRange*X; green=QuantumRange*Y; blue=QuantumRange*Z; break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002* PerceptibleReciprocal(film_gamma)); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002* PerceptibleReciprocal(film_gamma))-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))]; green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))]; blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))]; SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType) red)),q); SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType) green)),q); SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType) blue)),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (image->colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) 0.0000000; z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) 0.0000000; } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(image,q)); green=ScaleQuantumToMap(GetPixelGreen(image,q)); blue=ScaleQuantumToMap(GetPixelBlue(image,q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(image,ClampToQuantum(pixel.red),q); SetPixelGreen(image,ClampToQuantum(pixel.green),q); SetPixelBlue(image,ClampToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransformsRGBImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; size_t blue, green, red; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=(double) ClampToQuantum(pixel.red); image->colormap[i].green=(double) ClampToQuantum(pixel.green); image->colormap[i].blue=(double) ClampToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(MagickTrue); }
pixel-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP IIIII X X EEEEE L % % P P I X X E L % % PPPP I X EEE L % % P I X X E L % % P IIIII X X EEEEE LLLLL % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickWand Pixel View Methods % % % % Software Design % % John Cristy % % March 2003 % % % % % % Copyright 1999-2009 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/wand.h" #include "magick/monitor-private.h" /* Define declarations. */ #define PixelViewId "PixelView" /* Typedef declarations. */ struct _PixelView { unsigned long id; char name[MaxTextExtent]; ExceptionInfo *exception; MagickWand *wand; ViewInfo *view; RectangleInfo region; unsigned long number_threads; PixelWand ***pixel_wands; MagickBooleanType debug; unsigned long signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelView() makes a copy of the specified pixel view. % % The format of the ClonePixelView method is: % % PixelView *ClonePixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelView *ClonePixelView(const PixelView *pixel_view) { PixelView *clone_view; register long i; assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); clone_view=(PixelView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); (void) ResetMagickMemory(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatMagickString(clone_view->name,MaxTextExtent,"%s-%lu",PixelViewId, clone_view->id); clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,pixel_view->exception); clone_view->view=CloneCacheView(pixel_view->view); clone_view->region=pixel_view->region; clone_view->number_threads=pixel_view->number_threads; for (i=0; i < (long) pixel_view->number_threads; i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) pixel_view->pixel_wands[i],pixel_view->region.width); clone_view->debug=pixel_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=WandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelView() deallocates memory associated with a pixel view. % % The format of the DestroyPixelView method is: % % PixelView *DestroyPixelView(PixelView *pixel_view, % const unsigned long number_wands,const unsigned long number_threads) % % A description of each parameter follows: % % o pixel_view: the pixel view. % % o number_wand: the number of pixel wands. % % o number_threads: number of threads. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const unsigned long number_wands,const unsigned long number_threads) { register long i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (long) number_threads; i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); return((PixelWand ***) RelinquishMagickMemory(pixel_wands)); } WandExport PixelView *DestroyPixelView(PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); pixel_view->pixel_wands=DestroyPixelsThreadSet(pixel_view->pixel_wands, pixel_view->region.width,pixel_view->number_threads); pixel_view->view=DestroyCacheView(pixel_view->view); pixel_view->exception=DestroyExceptionInfo(pixel_view->exception); pixel_view->signature=(~WandSignature); RelinquishWandId(pixel_view->id); pixel_view=(PixelView *) RelinquishMagickMemory(pixel_view); return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferPixelViewIterator() iterates over three pixel views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel region is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination pixel view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferPixelViewIterator method is: % % MagickBooleanType DuplexTransferPixelViewIterator(PixelView *source, % PixelView *duplex,PixelView *destination, % DuplexTransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o duplex: the duplex pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferPixelViewIterator( PixelView *source,PixelView *duplex,PixelView *destination, DuplexTransferPixelViewMethod transfer,void *context) { #define DuplexTransferPixelViewTag "PixelView/DuplexTransfer" ExceptionInfo *exception; Image *destination_image, *duplex_image, *source_image; long progress, y; MagickBooleanType status; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (DuplexTransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; duplex_image=duplex->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) #endif for (y=source->region.y; y < (long) source->region.height; y++) { MagickBooleanType sync; register const IndexPacket *duplex_indexes, *indexes; register const PixelPacket *duplex_pixels, *pixels; register IndexPacket *destination_indexes; register long id, x; register PixelPacket *destination_pixels; if (status == MagickFalse) continue; id=GetPixelCacheThreadId(); pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (long) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (long) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x],indexes[x]); if (source_image->storage_class == PseudoClass) for (x=0; x < (long) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x],indexes[x]); duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->region.x,y, duplex->region.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view); for (x=0; x < (long) duplex->region.width; x++) PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x); if (duplex_image->colorspace == CMYKColorspace) for (x=0; x < (long) duplex->region.width; x++) PixelSetBlackQuantum(duplex->pixel_wands[id][x],duplex_indexes[x]); if (duplex_image->storage_class == PseudoClass) for (x=0; x < (long) duplex->region.width; x++) PixelSetIndex(duplex->pixel_wands[id][x],duplex_indexes[x]); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (long) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (long) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], destination_indexes[x]); if (destination_image->storage_class == PseudoClass) for (x=0; x < (long) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x],destination_indexes[x]); if (transfer(source,duplex,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (long) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (long) destination->region.width; x++) destination_indexes[x]=PixelGetBlackQuantum( destination->pixel_wands[id][x]); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical #endif proceed=SetImageProgress(source_image,DuplexTransferPixelViewTag, progress++,source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewException() returns the severity, reason, and description of any % error that occurs when utilizing a pixel view. % % The format of the GetPixelViewException method is: % % char *GetPixelViewException(const PixelWand *pixel_view, % ExceptionType *severity) % % A description of each parameter follows: % % o pixel_view: the pixel pixel_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetPixelViewException(const PixelView *pixel_view, ExceptionType *severity) { char *description; assert(pixel_view != (const PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); assert(severity != (ExceptionType *) NULL); *severity=pixel_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); *description='\0'; if (pixel_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->reason), MaxTextExtent); if (pixel_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w H e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewHeight() returns the pixel view height. % % The format of the GetPixelViewHeight method is: % % unsigned long GetPixelViewHeight(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport unsigned long GetPixelViewHeight(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.height); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewIterator() iterates over the pixel view in parallel and calls % your get method for each scanline of the view. The pixel region is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetPixelViewIterator method is: % % MagickBooleanType GetPixelViewIterator(PixelView *source, % GetPixelViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetPixelViewIterator(PixelView *source, GetPixelViewMethod get,void *context) { #define GetPixelViewTag "PixelView/Get" Image *source_image; long progress, y; MagickBooleanType status; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (get == (GetPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) #endif for (y=source->region.y; y < (long) source->region.height; y++) { register const IndexPacket *indexes; register const PixelPacket *pixels; register long id, x; if (status == MagickFalse) continue; id=GetPixelCacheThreadId(); pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (long) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (long) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x],indexes[x]); if (source_image->storage_class == PseudoClass) for (x=0; x < (long) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x],indexes[x]); if (get(source,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical #endif proceed=SetImageProgress(source_image,GetPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewPixels() returns the pixel view pixel_wands. % % The format of the GetPixelViewPixels method is: % % PixelWand *GetPixelViewPixels(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelWand **GetPixelViewPixels(const PixelView *pixel_view) { long id; assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); id=GetPixelCacheThreadId(); return(pixel_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWand() returns the magick wand associated with the pixel view. % % The format of the GetPixelViewWand method is: % % MagickWand *GetPixelViewWand(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickWand *GetPixelViewWand(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W i d t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWidth() returns the pixel view width. % % The format of the GetPixelViewWidth method is: % % unsigned long GetPixelViewWidth(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport unsigned long GetPixelViewWidth(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w X % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewX() returns the pixel view x offset. % % The format of the GetPixelViewX method is: % % long GetPixelViewX(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport long GetPixelViewX(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.x); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w Y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewY() returns the pixel view y offset. % % The format of the GetPixelViewY method is: % % long GetPixelViewY(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport long GetPixelViewY(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.y); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPixelView() returns MagickTrue if the the parameter is verified as a pixel % view container. % % The format of the IsPixelView method is: % % MagickBooleanType IsPixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickBooleanType IsPixelView(const PixelView *pixel_view) { size_t length; if (pixel_view == (const PixelView *) NULL) return(MagickFalse); if (pixel_view->signature != WandSignature) return(MagickFalse); length=strlen(PixelViewId); if (LocaleNCompare(pixel_view->name,PixelViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelView() returns a pixel view required for all other methods in the % Pixel View API. % % The format of the NewPixelView method is: % % PixelView *NewPixelView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const unsigned long number_wands, const unsigned long number_threads) { PixelWand ***pixel_wands; register long i; pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (long) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads)); } return(pixel_wands); } WandExport PixelView *NewPixelView(MagickWand *wand) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", strerror(errno)); (void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatMagickString(pixel_view->name,MaxTextExtent,"%s-%lu", PixelViewId,pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->wand=wand; pixel_view->view=AcquireCacheView(pixel_view->wand->images); pixel_view->region.width=wand->images->columns; pixel_view->region.height=wand->images->rows; pixel_view->number_threads=GetPixelCacheMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", strerror(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelViewRegion() returns a pixel view required for all other methods % in the Pixel View API. % % The format of the NewPixelViewRegion method is: % % PixelView *NewPixelViewRegion(MagickWand *wand,const long x, % const long y,const unsigned long width,const unsigned long height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a region of % pixel_wands view. % */ WandExport PixelView *NewPixelViewRegion(MagickWand *wand,const long x, const long y,const unsigned long width,const unsigned long height) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", strerror(errno)); (void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatMagickString(pixel_view->name,MaxTextExtent,"%s-%lu", PixelViewId,pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->view=AcquireCacheView(pixel_view->wand->images); pixel_view->wand=wand; pixel_view->region.width=width; pixel_view->region.height=height; pixel_view->region.x=x; pixel_view->region.y=y; pixel_view->number_threads=GetPixelCacheMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", strerror(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelViewIterator() iterates over the pixel view in parallel and calls % your set method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetPixelViewIterator method is: % % MagickBooleanType SetPixelViewIterator(PixelView *destination, % SetPixelViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the pixel view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetPixelViewIterator(PixelView *destination, SetPixelViewMethod set,void *context) { #define SetPixelViewTag "PixelView/Set" ExceptionInfo *exception; Image *destination_image; long progress, y; MagickBooleanType status; assert(destination != (PixelView *) NULL); assert(destination->signature == WandSignature); if (set == (SetPixelViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) #endif for (y=destination->region.y; y < (long) destination->region.height; y++) { MagickBooleanType sync; register IndexPacket *indexes; register long id, x; register PixelPacket *pixels; if (status == MagickFalse) continue; id=GetPixelCacheThreadId(); pixels=GetCacheViewAuthenticPixels(destination->view,destination->region.x, y,destination->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(destination->view); if (set(destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (long) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (long) destination->region.width; x++) indexes[x]=PixelGetBlackQuantum(destination->pixel_wands[id][x]); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical #endif proceed=SetImageProgress(destination_image,SetPixelViewTag,progress++, destination->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferPixelViewIterator() iterates over two pixel views in parallel and % calls your transfer method for each scanline of the view. The source pixel % region is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination pixel view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferPixelViewIterator method is: % % MagickBooleanType TransferPixelViewIterator(PixelView *source, % PixelView *destination,TransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferPixelViewIterator(PixelView *source, PixelView *destination,TransferPixelViewMethod transfer,void *context) { #define TransferPixelViewTag "PixelView/Transfer" ExceptionInfo *exception; Image *destination_image, *source_image; long progress, y; MagickBooleanType status; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (TransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) #endif for (y=source->region.y; y < (long) source->region.height; y++) { MagickBooleanType sync; register const IndexPacket *indexes; register const PixelPacket *pixels; register IndexPacket *destination_indexes; register long id, x; register PixelPacket *destination_pixels; if (status == MagickFalse) continue; id=GetPixelCacheThreadId(); pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (long) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (long) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x],indexes[x]); if (source_image->storage_class == PseudoClass) for (x=0; x < (long) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x],indexes[x]); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (long) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (long) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x],indexes[x]); if (destination_image->storage_class == PseudoClass) for (x=0; x < (long) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x],indexes[x]); if (transfer(source,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (long) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (long) destination->region.width; x++) destination_indexes[x]=PixelGetBlackQuantum( destination->pixel_wands[id][x]); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical #endif proceed=SetImageProgress(source_image,TransferPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdatePixelViewIterator() iterates over the pixel view in parallel and calls % your update method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdatePixelViewIterator method is: % % MagickBooleanType UpdatePixelViewIterator(PixelView *source, % UpdatePixelViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdatePixelViewIterator(PixelView *source, UpdatePixelViewMethod update,void *context) { #define UpdatePixelViewTag "PixelView/Update" ExceptionInfo *exception; Image *source_image; long progress, y; MagickBooleanType status; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (update == (UpdatePixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) #endif for (y=source->region.y; y < (long) source->region.height; y++) { register IndexPacket *indexes; register long id, x; register PixelPacket *pixels; if (status == MagickFalse) continue; id=GetPixelCacheThreadId(); pixels=GetCacheViewAuthenticPixels(source->view,source->region.x,y, source->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException( source->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(source->view); for (x=0; x < (long) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (long) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x],indexes[x]); if (update(source,context) == MagickFalse) status=MagickFalse; for (x=0; x < (long) source->region.width; x++) PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (long) source->region.width; x++) indexes[x]=PixelGetBlackQuantum(source->pixel_wands[id][x]); if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical #endif proceed=SetImageProgress(source_image,UpdatePixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
GB_unop__identity_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_bool_fp64) // op(A') function: GB (_unop_tran__identity_bool_fp64) // C type: bool // A type: double // cast: bool cij = (aij != 0) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (aij != 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (aij != 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_bool_fp64) ( bool *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; bool z = (aij != 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; bool z = (aij != 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_bool_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ast-dump-openmp-barrier.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test(void) { #pragma omp barrier } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-barrier.c:3:1, line:5:1> line:3:6 test 'void (void)' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:17, line:5:1> // CHECK-NEXT: `-OMPBarrierDirective {{.*}} <line:4:1, col:20> openmp_standalone_directive
GB_unaryop__minv_uint32_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint32_fp32 // op(A') function: GB_tran__minv_uint32_fp32 // C type: uint32_t // A type: float // cast: uint32_t cij ; GB_CAST_UNSIGNED(cij,aij,32) // unaryop: cij = GB_IMINV_UNSIGNED (aij, 32) #define GB_ATYPE \ float #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ uint32_t z ; GB_CAST_UNSIGNED(z,x,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint32_fp32 ( uint32_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
NDArray.h
#ifndef NDARRAY_H #define NDARRAY_H #include <initializer_list> #include <functional> #include <shape.h> #include "NativeOpExcutioner.h" #include <memory/Workspace.h> #include <indexing/NDIndex.h> #include <indexing/IndicesList.h> #include <graph/Intervals.h> #include <array/DataType.h> #include <stdint.h> #include <array/ArrayOptions.h> #include <array/ArrayType.h> namespace nd4j { template<typename T> class ND4J_EXPORT NDArray; ND4J_EXPORT NDArray<float> operator-(const float, const NDArray<float>&); ND4J_EXPORT NDArray<float16> operator-(const float16, const NDArray<float16>&); ND4J_EXPORT NDArray<double> operator-(const double, const NDArray<double>&); ND4J_EXPORT NDArray<float> operator+(const float, const NDArray<float>&); ND4J_EXPORT NDArray<float16> operator+(const float16, const NDArray<float16>&); ND4J_EXPORT NDArray<double> operator+(const double, const NDArray<double>&); template<typename T> NDArray<T> mmul(const NDArray<T>&, const NDArray<T>&); template<typename T> class NDArray { protected: /** * if true then array doesn't own buffer and simply points to another's buffer */ bool _isView = false; /** * pointer on flattened data array in memory */ T *_buffer = nullptr; /** * contains shape info: matrix rank, numbers of elements per each dimension, dimensions strides, element-wise-stride, c-like or fortan-like order */ Nd4jLong *_shapeInfo = nullptr; /** * pointer on externally allocated memory where _buffer and _shapeInfo are stored */ nd4j::memory::Workspace* _workspace = nullptr; /** * alternative buffers for special computational devices (like GPUs for CUDA) */ T* _bufferD = nullptr; Nd4jLong *_shapeInfoD = nullptr; /** * indicates whether user allocates memory for _buffer/_shapeInfo by himself, in opposite case the memory must be allocated from outside */ bool _isShapeAlloc = false; bool _isBuffAlloc = false; /** * Field to store cached length */ Nd4jLong _length = -1L; /** * type of array elements */ DataType _dataType = DataType_FLOAT; std::string toStringValue(T value); public: static NDArray<T>* createEmpty(nd4j::memory::Workspace* workspace = nullptr); /** * default constructor, do not allocate memory, memory for array is passed from outside */ NDArray(T *buffer = nullptr, Nd4jLong* shapeInfo = nullptr, nd4j::memory::Workspace* workspace = nullptr); NDArray(std::initializer_list<Nd4jLong> shape, nd4j::memory::Workspace* workspace = nullptr); /** * Constructor for scalar NDArray */ NDArray(T scalar); /** * copy constructor */ NDArray(const NDArray<T>& other); /** * move constructor */ NDArray(NDArray<T>&& other) noexcept; #ifndef __JAVACPP_HACK__ // this method only available out of javacpp /** * This constructor creates vector of T * * @param values */ NDArray(std::initializer_list<T> values, nd4j::memory::Workspace* workspace = nullptr); NDArray(std::vector<T> &values, nd4j::memory::Workspace* workspace = nullptr); #endif /** * constructor, create empty array stored at given workspace */ NDArray(nd4j::memory::Workspace* workspace); /** * this constructor creates new NDArray with shape matching "other" array, do not copy "other" elements into new array */ NDArray(const NDArray<T> *other, const bool copyStrides = false, nd4j::memory::Workspace* workspace = nullptr); /** * constructor creates new NDArray using shape information from "shapeInfo", set all elements in new array to be zeros, if copyStrides is true then use stride values from "shapeInfo", else calculate strides independently */ NDArray(const Nd4jLong* shapeInfo, const bool copyStrides = false, nd4j::memory::Workspace* workspace = nullptr); /** * this constructor creates new array using shape information contained in vector argument */ NDArray(const char order, const std::vector<Nd4jLong> &shape, nd4j::memory::Workspace* workspace = nullptr); /** * This constructor creates new array with elements copied from data and using shape information stored in shape * * PLEASE NOTE: data will be copied AS IS, without respect to specified order. You must ensure order match here. */ NDArray(const char order, const std::vector<Nd4jLong> &shape, const std::vector<T> &data, nd4j::memory::Workspace* workspace = nullptr); /** * this constructor creates new array using given buffer (without memory allocating) and shape information stored in shape */ NDArray(T *buffer, const char order, const std::vector<Nd4jLong> &shape , nd4j::memory::Workspace* workspace = nullptr); /** * copy assignment operator */ NDArray<T>& operator=(const NDArray<T>& other); /** * move assignment operator */ NDArray<T>& operator=(NDArray<T>&& other) noexcept; /** * assignment operator, assigns the same scalar to all array elements */ NDArray<T>& operator=(const T scalar); /** * operators for memory allocation and deletion */ void* operator new(size_t i); void operator delete(void* p); /** * method replaces existing buffer/shapeinfo, AND releases original pointers (if releaseExisting TRUE) */ void replacePointers(T *buffer, Nd4jLong *shapeInfo, const bool releaseExisting = true); /** * create a new array by replicating current array by repeats times along given dimension * dimension - dimension along which to repeat elements * repeats - number of repetitions */ NDArray<T>* repeat(int dimension, const std::vector<Nd4jLong>& repeats) const; /** * fill target array by repeating current array * dimension - dimension along which to repeat elements */ void repeat(int dimension, NDArray<T>& target) const; /** * return _dataType; */ DataType dataType() const; /** * creates array which is view of this array */ NDArray<T>* getView(); /** * creates array which points on certain sub-range of this array, sub-range is defined by given indices */ NDArray<T> *subarray(IndicesList& indices) const; NDArray<T> *subarray(IndicesList& indices, std::vector<Nd4jLong>& strides) const; NDArray<T>* subarray(const std::initializer_list<NDIndex*>& idx) const; NDArray<T>* subarray(const Intervals& idx) const; /** * cast array elements to given dtype */ NDArray<T>* cast(DataType dtype); void cast(NDArray<T>* target, DataType dtype); /** * returns _workspace */ nd4j::memory::Workspace* getWorkspace() const { return _workspace; } /** * returns _buffer */ T* getBuffer() const; T* buffer(); /** * returns _shapeInfo */ Nd4jLong* shapeInfo(); Nd4jLong* getShapeInfo() const; /** * if _bufferD==nullptr return _buffer, else return _bufferD */ T* specialBuffer(); /** * Returns True if it's legally empty NDArray, or false otherwise * @return */ FORCEINLINE bool isEmpty() const; /** * if _shapeInfoD==nullptr return _shapeInfo, else return _shapeInfoD */ Nd4jLong* specialShapeInfo(); /** * set values for _bufferD and _shapeInfoD */ void setSpecialBuffers(T * buffer, Nd4jLong *shape); /** * permutes (in-place) the dimensions in array according to "dimensions" array */ bool permutei(const std::initializer_list<int>& dimensions); bool permutei(const std::vector<int>& dimensions); bool permutei(const int* dimensions, const int rank); bool permutei(const std::initializer_list<Nd4jLong>& dimensions); bool permutei(const std::vector<Nd4jLong>& dimensions); bool permutei(const Nd4jLong* dimensions, const int rank); bool isFinite(); bool hasNaNs(); bool hasInfs(); /** * permutes the dimensions in array according to "dimensions" array, new array points on _buffer of this array */ NDArray<T>* permute(const std::initializer_list<int>& dimensions) const; NDArray<T>* permute(const std::vector<int>& dimensions) const; NDArray<T>* permute(const int* dimensions, const int rank) const; void permute(const int* dimensions, const int rank, NDArray<T>& target) const; void permute(const std::vector<int>& dimensions, NDArray<T>& target) const; NDArray<T>* permute(const std::initializer_list<Nd4jLong>& dimensions) const; NDArray<T>* permute(const std::vector<Nd4jLong>& dimensions) const; NDArray<T>* permute(const Nd4jLong* dimensions, const int rank) const; void permute(const Nd4jLong* dimensions, const int rank, NDArray<T>& target) const; void permute(const std::vector<Nd4jLong>& dimensions, NDArray<T>& target) const; /** * This method streamlines given view or permuted array, and reallocates buffer */ void streamline(char order = 'a'); /** * check whether array is contiguous in memory */ bool isContiguous(); /** * prints information about array shape * msg - message to print out */ void printShapeInfo(const char * msg = nullptr) const; /** * prints buffer elements * msg - message to print out * limit - number of array elements to print out */ void printBuffer(const char* msg = nullptr, Nd4jLong limit = -1); /** * prints buffer elements, takes into account offset between elements (element-wise-stride) * msg - message to print out * limit - number of array elements to print out */ void printIndexedBuffer(const char* msg = nullptr, Nd4jLong limit = -1) const; std::string asIndexedString(Nd4jLong limit = -1); std::string asString(Nd4jLong limit = -1); /** * this method assigns values of given array to this one */ void assign(const NDArray<T>* other); /** * this method assigns values of given array to this one */ void assign(const NDArray<T>& other); /** * this method assigns given value to all elements in array */ void assign(const T value); /** * returns new copy of this array, optionally in different order */ NDArray<T> *dup(const char newOrder = 'a'); /** * returns sum of all elements of array */ T sumNumber() const; /** * returns mean number of array */ T meanNumber() const; /** * This method explicitly enforces new shape for this NDArray, old shape/stride information is lost */ void enforce(const std::initializer_list<Nd4jLong> &dimensions, char order = 'a'); void enforce(std::vector<Nd4jLong> &dimensions, char order = 'a'); /** * calculates sum along dimension(s) in this array and save it to created reduced array * dimensions - array of dimensions to calculate sum over * keepDims - if true then put unities in place of reduced dimensions */ NDArray<T> *sum(const std::vector<int> &dimensions) const; /** * method reduces array by excluding its shapes along dimensions present in given dimensions vector, result is stored in new array to be returned * dimensions - array of dimensions to reduce along * keepDims - if true then put unities in place of reduced dimensions */ template<typename OpName> NDArray<T>* reduceAlongDimension(const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; template<typename OpName> NDArray<T>* reduceAlongDimension(const std::initializer_list<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; template<typename OpName> NDArray<T> reduceAlongDims(const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; /** * method reduces array by excluding its shapes along dimensions present in given dimensions vector * target - where to save result of reducing * dimensions - array of dimensions to reduce along * keepDims - if true then put unities in place of reduced dimensions * extras - extra parameters */ template<typename OpName> void reduceAlongDimension(NDArray<T>* target, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false, T *extras = nullptr) const; /** * return variance of array elements set * biasCorrected - if true bias correction will be applied */ template<typename OpName> T varianceNumber(bool biasCorrected = true); /** * apply scalar operation to array * extraParams - extra parameters for operation */ template<typename OpName> T reduceNumber(T *extraParams = nullptr) const; /** * returns element index which corresponds to some condition imposed by operation * extraParams - extra parameters for operation */ template<typename OpName> Nd4jLong indexReduceNumber(T *extraParams = nullptr); /** * returns index of max element in a given array (optionally: along given dimension(s)) * dimensions - optional vector with dimensions */ Nd4jLong argMax(std::initializer_list<int> dimensions = {}); /** * apply OpName transformation directly to array * extraParams - extra parameters for operation */ template<typename OpName> void applyTransform(T *extraParams = nullptr); /** * apply OpName transformation to array and store result in target * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyTransform(NDArray<T> *target, T *extraParams = nullptr); /** * apply OpName transformation to this array and store result in new array being returned * extraParams - extra parameters for operation */ template<typename OpName> NDArray<T> transform(T *extraParams = nullptr) const; /** * apply pairwise OpName transformation based on "this" and "other" arras elements, store result in this array * other - second array necessary for pairwise operation * extraParams - extra parameters for operation */ template<typename OpName> void applyPairwiseTransform(NDArray<T> *other, T *extraParams); /** * apply pairwise OpName transformation based on "this" and "other" arras elements, store result in target array * other - second array necessary for pairwise operation * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyPairwiseTransform(NDArray<T> *other, NDArray<T> *target, T *extraParams); /** * apply operation which requires broadcasting, broadcast a smaller array (tad) along bigger one (this) * tad - array to broadcast * dimensions - dimensions array to broadcast along * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyBroadcast(std::initializer_list<int> dimensions, const NDArray<T>* tad, NDArray<T>* target = nullptr, T* extraArgs = nullptr); template <typename OpName> void applyBroadcast(std::vector<int> &dimensions, const NDArray<T> *tad, NDArray<T> *target = nullptr, T *extraArgs = nullptr); /** * apply operation which requires broadcasting, broadcast one tensor along another, also this method checks the possibility of broadcasting * other - input array * extraParams - extra parameters for operation */ template <typename OpName> NDArray<T> applyTrueBroadcast(const NDArray<T>& other, T *extraArgs = nullptr) const; template <typename OpName> NDArray<T>* applyTrueBroadcast(const NDArray<T>* other, T *extraArgs = nullptr) const; /** * apply operation which requires broadcasting, broadcast one tensor along another, also this method checks the possibility of broadcasting * other - input array * target - where to store result * checkTargetShape - if true check whether target shape is suitable for broadcasting * extraParams - extra parameters for operation */ template <typename OpName> void applyTrueBroadcast(const NDArray<T>* other, NDArray<T>* target, const bool checkTargetShape = true, T *extraArgs = nullptr) const; /** * apply a scalar operation to an array * scalar - input scalar * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyScalar(T scalar, NDArray<T>* target = nullptr, T *extraParams = nullptr) const; /** * apply a scalar operation to an array * scalar - input array which is simple scalar * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyScalar(NDArray<T>& scalar, NDArray<T>* target = nullptr, T *extraParams = nullptr) const; #ifndef __JAVACPP_HACK__ /** * apply operation "func" to an array * func - what operation to apply * target - where to store result */ void applyLambda(const std::function<T(T)>& func, NDArray<T>* target = nullptr); void applyIndexedLambda(const std::function<T(Nd4jLong, T)>& func, NDArray<T>* target = nullptr); /** * apply pairwise operation "func" to an array * other - input array * func - what pairwise operation to apply * target - where to store result */ void applyPairwiseLambda(NDArray<T>* other, const std::function<T(T, T)>& func, NDArray<T>* target = nullptr); void applyIndexedPairwiseLambda(NDArray<T>* other, const std::function<T(Nd4jLong, T, T)>& func, NDArray<T>* target = nullptr); void applyTriplewiseLambda(NDArray<T>* second, NDArray<T> *third, const std::function<T(T, T, T)>& func, NDArray<T>* target = nullptr); #endif /** * apply OpName random operation to array * buffer - pointer on RandomBuffer * y - optional input array * z - optional input array * extraArgs - extra parameters for operation */ template<typename OpName> void applyRandom(nd4j::random::RandomBuffer *buffer, NDArray<T>* y = nullptr, NDArray<T>* z = nullptr, T* extraArgs = nullptr); /** * apply transpose operation to the copy of this array, that is this array remains unaffected */ NDArray<T> *transpose() const; /** * perform transpose operation and store result in target, this array remains unaffected * target - where to store result */ void transpose(NDArray<T>& target) const; /** * apply in-place transpose operation to this array, so this array becomes transposed */ void transposei(); /** * return array pointing on certain range of this array * index - the number of array to be returned among set of possible arrays * dimensions - array of dimensions to point on */ NDArray<T>* tensorAlongDimension(Nd4jLong index, const std::initializer_list<int>& dimensions) const; NDArray<T>* tensorAlongDimension(Nd4jLong index, const std::vector<int>& dimensions) const; /** * returns the number of arrays pointing on specified dimension(s) * dimensions - array of dimensions to point on */ Nd4jLong tensorsAlongDimension(const std::initializer_list<int> dimensions) const ; Nd4jLong tensorsAlongDimension(const std::vector<int>& dimensions) const ; /** * returns true if elements of two arrays are equal to within given epsilon value * other - input array to compare * eps - epsilon, this value defines the precision of elements comparison */ bool equalsTo(const NDArray<T> *other, T eps = (T) 1e-5f) const; bool equalsTo(NDArray<T> &other, T eps = (T) 1e-5f) const; /** * add given row vector to all rows of this array * row - row vector to add */ void addiRowVector(const NDArray<T> *row); /** * add given row vector to all rows of this array, store result in target * row - row vector to add * target - where to store result */ void addRowVector(const NDArray<T> *row, NDArray<T>* target) const; /** * subtract given row vector from all rows of this array, store result in target * row - row vector to subtract * target - where to store result */ void subRowVector(const NDArray<T> *row, NDArray<T>* target) const; /** * multiply all rows of this array on given row vector, store result in target * row - row vector to multiply on * target - where to store result */ void mulRowVector(const NDArray<T> *row, NDArray<T>* target) const; /** * divide all rows of this array on given row vector, store result in target * row - row vector to divide on * target - where to store result */ void divRowVector(const NDArray<T> *row, NDArray<T>* target) const; /** * add given column vector to all columns of this array, store result in target * column - column vector to add * target - where to store result */ void addColumnVector(const NDArray<T> *column, NDArray<T>* target) const; /** * add given column vector to all columns of this array, this array becomes affected (in-place operation) * column - column vector to add */ void addiColumnVector(const NDArray<T> *column); /** * multiply all columns of this array on given column vector, this array becomes affected (in-place operation) * column - column vector to multiply on */ void muliColumnVector(const NDArray<T> *column); /** * returns number of bytes used by _buffer & _shapeInfo */ Nd4jLong memoryFootprint(); /** * these methods suited for FlatBuffers use */ std::vector<T> getBufferAsVector(); std::vector<Nd4jLong> getShapeAsVector(); std::vector<Nd4jLong> getShapeInfoAsVector(); std::vector<int64_t> getShapeInfoAsFlatVector(); /** * set new order and shape in case of suitable array length (in-place operation) * order - order to set * shape - shape to set * * if there was permute applied before or there are weird strides, then new buffer is allocated for array */ bool reshapei(const char order, const std::initializer_list<Nd4jLong>& shape); bool reshapei(const char order, const std::vector<Nd4jLong>& shape); bool reshapei(const std::initializer_list<Nd4jLong>& shape); bool reshapei(const std::vector<Nd4jLong>& shape); /** * creates new array with corresponding order and shape, new array will point on _buffer of this array * order - order to set * shape - shape to set * * if permute have been applied before or there are weird strides, then new buffer is allocated for new array */ NDArray<T>* reshape(const char order, const std::vector<Nd4jLong>& shape) const; /** * calculate strides and set given order * order - order to set */ void updateStrides(const char order); /** * change an array by repeating it the number of times given by reps (in-place operation) * repeats - contains numbers of repetitions */ void tilei(const std::vector<Nd4jLong>& repeats); /** * returns new array which is created by by repeating of this array the number of times given by reps * repeats - contains numbers of repetitions */ NDArray<T> tile(const std::vector<Nd4jLong>& repeats) const; /** * change an array by repeating it the number of times given by reps (in-place operation) * repeats - contains numbers of repetitions * target - where to store result */ void tile(const std::vector<Nd4jLong>& repeats, NDArray<T>& target) const; /** * change an array by repeating it the number of times to acquire the new shape which is the same as target shape * target - where to store result */ void tile(NDArray<T>& target) const; /** * returns an array which is result of broadcasting of this and other arrays * other - input array */ NDArray<T>* broadcast(const NDArray<T>& other); /** * check whether array's rows (arg=0) or columns (arg=1) create orthogonal basis * arg - 0 -> row, 1 -> column */ bool hasOrthonormalBasis(const int arg); /** * check whether array is identity matrix */ bool isIdentityMatrix(); /** * check whether array is unitary matrix */ bool isUnitary(); /** * reduces dimensions in this array relying on index operation OpName * dimensions - vector of dimensions to reduce along * extraArgs - extra parameters for operation */ template<typename OpName> NDArray<T>* applyIndexReduce(const std::vector<int>& dimensions, const T *extraParams = nullptr) const; /** * reduces dimensions in array relying on index operation OpName * target - where to store result * dimensions - vector of dimensions to reduce along * extraArgs - extra parameters for operation */ template<typename OpName> void applyIndexReduce(const NDArray<T>* target, const std::vector<int>& dimensions, const T *extraParams = nullptr) const; /** * apply reduce3 operation OpName to this and other array, return result in new output array * other - input array * extraArgs - extra parameters for operation */ template<typename OpName> NDArray<T>* applyReduce3(const NDArray<T>* other, const T* extraParams = nullptr) const; /** * apply reduce3 operation OpName to this and other array, return result in new output array * other - input array * dimensions - vector of dimensions to reduce along (tads not axis) * extraArgs - extra parameters for operation */ template<typename OpName> NDArray<T>* applyAllReduce3(const NDArray<T>* other, const std::vector<int>& dimensions, const T* extraParams = nullptr) const; /** * apply reduce3 (exec) operation OpName to this and other array, return result in new output array * other - input array * dimensions - vector of dimensions to reduce along (same as reduceAlongDimension) * extraArgs - extra parameters for operation */ template<typename OpName> NDArray<T>* applyReduce3(const NDArray<T>* other, const std::vector<int>& dimensions, const T* extraParams = nullptr) const; /** * returns variance along given dimensions * biasCorrected - if true bias correction will be applied * dimensions - vector of dimensions to calculate variance along */ template<typename OpName> NDArray<T>* varianceAlongDimension(const bool biasCorrected, const std::vector<int>& dimensions) const; template<typename OpName> NDArray<T>* varianceAlongDimension(const bool biasCorrected, const std::initializer_list<int>& dimensions) const; template<typename OpName> void varianceAlongDimension(const NDArray<T>* target, const bool biasCorrected, const std::vector<int>& dimensions); template<typename OpName> void varianceAlongDimension(const NDArray<T>* target, const bool biasCorrected, const std::initializer_list<int>& dimensions); /** * operator returns sub-array with buffer pointing at this->_buffer with offset defined by given intervals * idx - intervals of indexes which define the sub-arrays to point on * keepUnitiesInShape - if false then eliminate unities from resulting array shape, for example {1,a,1,b} -> {a,b} */ NDArray<T> operator()(const Intervals& idx, bool keepUnitiesInShape = false) const; /** * operator returns sub-array with buffer pointing at this->_buffer with offset defined by given intervals * idx - intervals of indexes which define the sub-arrays to point on, idx has form {dim0Start,dim0End, dim1Start,dim1End, ....} and length (2 * this->rankOf()) * when (dimStart == dimEnd) then whole range will be used for current dimension * keepUnitiesInShape - if false then eliminate unities from resulting array shape, for example {1,a,1,b} -> {a,b} */ NDArray<T> operator()(const int* idx, bool keepUnitiesInShape = false) const; /** * addition operator: array + other * other - input array to add */ NDArray<T> operator+(const NDArray<T>& other) const; /** * addition operator: array + scalar * scalar - input scalar to add */ NDArray<T> operator+(const T scalar) const; /** * friend functions which implement addition operator: scalar + array * scalar - input scalar to add */ friend NDArray<float> nd4j::operator+(const float scalar, const NDArray<float>& arr); friend NDArray<float16> nd4j::operator+(const float16 scalar, const NDArray<float16>& arr); friend NDArray<double> nd4j::operator+(const double scalar, const NDArray<double>& arr); /** * addition unary operator array += other * other - input array to add */ void operator+=(const NDArray<T>& other); /** * subtraction unary operator array -= other * other - input array to add */ void operator-=(const NDArray<T>& other); void operator+=(const T other); void operator-=(const T other); /** * subtraction operator: array - other * other - input array to subtract */ NDArray<T> operator-(const NDArray<T>& other) const; /** * subtraction operator: array - scalar * scalar - input scalar to subtract */ NDArray<T> operator-(const T& scalar) const; /** * negative operator, it changes sign of all array elements on opposite */ NDArray<T> operator-() const; /** * friend functions which implement subtraction operator: scalar - array * scalar - input scalar to subtract */ friend NDArray<float> nd4j::operator-(const float scalar, const NDArray<float>& arr); friend NDArray<float16> nd4j::operator-(const float16 scalar, const NDArray<float16>& arr); friend NDArray<double> nd4j::operator-(const double scalar, const NDArray<double>& arr); /** * pairwise multiplication operator: array * other * other - input array to multiply on */ NDArray<T> operator*(const NDArray<T>& other) const; /** * multiplication operator: array * scalar * scalar - input scalar to multiply on */ NDArray<T> operator*(const T scalar) const; /** * pairwise multiplication unary operator array *= other * other - input array to multiply on */ void operator*=(const NDArray<T>& other); /** * multiplication unary operator array *= scalar * scalar - input scalar to multiply on */ void operator*=(const T scalar); /** * pairwise division operator: array / other * other - input array to divide on */ NDArray<T> operator/(const NDArray<T>& other) const; /** * division operator: array / scalar * scalar - input scalar to divide each array element on */ NDArray<T> operator/(const T scalar) const; /** * pairwise division unary operator: array /= other * other - input array to divide on */ void operator/=(const NDArray<T>& other); /** * division unary operator: array /= scalar * scalar - input scalar to divide on */ void operator/=(const T scalar); /** * friend function which implements mathematical multiplication of two arrays * left - input array * right - input array */ friend NDArray<T> mmul<>(const NDArray<T>& left, const NDArray<T>& right); /** * this method assigns elements of other array to the sub-array of this array defined by given intervals * other - input array to assign elements from * idx - intervals of indexes which define the sub-array */ void assign(const NDArray<T>& other, const Intervals& idx); /** * return vector containing _buffer as flat binary array */ std::vector<int8_t> asByteVector(); /** * makes array to be identity matrix (not necessarily square), that is set all diagonal elements = 1, rest = 0 */ void setIdentity(); /** * swaps the contents of tow arrays, * PLEASE NOTE: method doesn't take into account the shapes of arrays, shapes may be different except one condition: arrays lengths must be the same */ void swapUnsafe(NDArray<T>& other); /** * return vector with buffer which points on corresponding diagonal elements of array * type - means of vector to be returned: column ('c') or row ('r') */ NDArray<T>* diagonal(const char type ) const; /** * fill matrix with given value starting from specified diagonal in given direction, works only with 2D matrix * * diag - diagonal starting from matrix is filled. * diag = 0 corresponds to main diagonal, * diag < 0 below main diagonal * diag > 0 above main diagonal * direction - in what direction to fill matrix. There are 2 possible directions: * 'u' - fill up, mathematically this corresponds to lower triangular matrix * 'l' - fill down, mathematically this corresponds to upper triangular matrix */ void setValueInDiagMatrix(const T& value, const int diag, const char direction); /** * change an array by repeating it the number of times in order to acquire new shape equal to the input shape * * shape - contains new shape to broadcast array to * target - optional argument, if target != nullptr the resulting array will be placed it target, in opposite case tile operation is done in place */ void tileToShape(const std::vector<Nd4jLong>& shape, NDArray<T>* target = nullptr); void tileToShape(const std::initializer_list<Nd4jLong>& shape, NDArray<T>* target = nullptr); template <typename N> NDArray<N>* asT(); /** * calculates the trace of an array, that is sum of elements on main diagonal = sum array[i, i, i, ...] */ T getTrace() const; /** * default destructor */ ~NDArray() noexcept; /** * set _shapeInfo */ FORCEINLINE void setShapeInfo(Nd4jLong *shapeInfo); /** * set _buffer */ FORCEINLINE void setBuffer(T* buffer); /** * set _isBuffAlloc and _isShapeAlloc */ FORCEINLINE void triggerAllocationFlag(bool bufferAllocated, bool shapeAllocated); /** * returns the value of "dim" dimension */ Nd4jLong sizeAt(int dim) const; /** * returns order of array */ FORCEINLINE char ordering() const; /** * return _isView */ FORCEINLINE bool isView(); /** * returns shape portion of shapeInfo */ FORCEINLINE Nd4jLong* shapeOf() const; /** * returns strides portion of shapeInfo */ FORCEINLINE Nd4jLong* stridesOf() const; /** * returns rank of array */ FORCEINLINE int rankOf() const; /** * returns length of array */ FORCEINLINE Nd4jLong lengthOf() const; /** * returns number of rows in array */ FORCEINLINE Nd4jLong rows() const; /** * returns number of columns in array */ FORCEINLINE Nd4jLong columns() const; /** * returns size of array elements type */ FORCEINLINE int sizeOfT() const; /** * returns element-wise-stride */ FORCEINLINE Nd4jLong ews() const; // returns true if arrays have same shape FORCEINLINE bool isSameShape(const NDArray<T> *other) const; FORCEINLINE bool isSameShape(NDArray<T> &other) const; FORCEINLINE bool isSameShape(const std::initializer_list<Nd4jLong>& shape) const; FORCEINLINE bool isSameShape(const std::vector<Nd4jLong>& shape) const; /** * returns true if these two NDArrays have same rank, dimensions, strides, ews and order */ FORCEINLINE bool isSameShapeStrict(const NDArray<T> *other) const; /** * returns true if buffer && shapeInfo were defined (non nullptr) */ FORCEINLINE bool nonNull() const; /** * returns array element with given index from linear buffer * i - element index in array */ FORCEINLINE T getScalar(const Nd4jLong i) const; /** * returns array element with given index, takes into account offset between elements (element-wise-stride) * i - element index in array */ FORCEINLINE T getIndexedScalar(const Nd4jLong i) const; /** * returns element with given indexes from 2D array * i - number of row * j - number of column */ FORCEINLINE T getScalar(const Nd4jLong i, const Nd4jLong j) const; /** * returns element with given indexes from 3D array * i - height * j - width * k - depth */ FORCEINLINE T getScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const; /** * assigns given scalar to array element by given index, takes into account offset between elements (element-wise-stride) * i - element index in array * value - scalar value to assign */ FORCEINLINE void putIndexedScalar(const Nd4jLong i, const T value); /** * assigns given scalar to array element by given index, regards array buffer as linear * i - element index in array * value - scalar value to assign */ FORCEINLINE void putScalar(const Nd4jLong i, const T value); /** * assigns given scalar to 2D array element by given indexes * i - number of row * j - number of row * value - scalar value to assign */ FORCEINLINE void putScalar(const Nd4jLong i, const Nd4jLong j, const T value); /** * assigns given scalar to 3D array element by given indexes * i - height * j - width * k - depth * value - scalar value to assign */ FORCEINLINE void putScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k, const T value); /** * returns true if array is 2D */ FORCEINLINE bool isMatrix() const; /** * returns true if array is vector */ FORCEINLINE bool isVector() const; /** * returns true if array is column vector */ FORCEINLINE bool isColumnVector() const; /** * returns true if array is row vector */ FORCEINLINE bool isRowVector() const; /** * returns true if array is scalar */ FORCEINLINE bool isScalar() const; /** * inline accessing operator for matrix, i - absolute index */ FORCEINLINE T operator()(const Nd4jLong i) const; /** * inline modifying operator for matrix, i - absolute index */ FORCEINLINE T& operator()(const Nd4jLong i); /** * inline accessing operator for 2D array, i - row, j - column */ FORCEINLINE T operator()(const Nd4jLong i, const Nd4jLong j) const; /** * inline modifying operator for 2D array, i - row, j - column */ FORCEINLINE T& operator()(const Nd4jLong i, const Nd4jLong j); /** * inline accessing operator for 3D array, i - height, j - width, k - depth */ FORCEINLINE T operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const; /** * inline modifying operator for 3D array, i - height, j - width, k - depth */ FORCEINLINE T& operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k); /** * inline modifying operator for 4D array, i - height, j - width, k - depth */ FORCEINLINE T& operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w); /** * inline accessing operator for 4D array, i - height, j - width, k - depth */ FORCEINLINE T operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w) const; template <typename T2> FORCEINLINE std::vector<T2> asVectorT(); FORCEINLINE bool isAttached(); NDArray<T>* detach(); FORCEINLINE bool operator == (const NDArray<T> &other) const; }; ////////////////////////////////////////////////////////////////////////// ///// IMLEMENTATION OF INLINE METHODS ///// ////////////////////////////////////////////////////////////////////////// template <typename T> template <typename T2> std::vector<T2> NDArray<T>::asVectorT() { std::vector<T2> result(this->lengthOf()); #pragma omp parallel for simd for (int e = 0; e < this->lengthOf(); e++) result[e] = static_cast<T2>(this->getIndexedScalar(e)); return result; } template<typename T> bool NDArray<T>::isAttached() { return this->_workspace != nullptr; } ////////////////////////////////////////////////////////////////////////// template<typename T> void NDArray<T>::setShapeInfo(Nd4jLong *shapeInfo) { if(_isShapeAlloc && _workspace == nullptr) delete []_shapeInfo; _shapeInfo = shapeInfo; _isShapeAlloc = false; if (shapeInfo != nullptr) this->_length = shape::length(shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> void NDArray<T>::setBuffer(T* buffer) { if(_isBuffAlloc && _workspace == nullptr) delete []_buffer; _buffer = buffer; _isBuffAlloc = false; } ////////////////////////////////////////////////////////////////////////// template<typename T> void NDArray<T>::triggerAllocationFlag(bool bufferAllocated, bool shapeAllocated) { _isBuffAlloc = bufferAllocated; _isShapeAlloc = shapeAllocated; } ////////////////////////////////////////////////////////////////////////// template<typename T> char NDArray<T>::ordering() const { return shape::order(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isView() { return _isView; } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong* NDArray<T>::shapeOf() const { return shape::shapeOf(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong* NDArray<T>::stridesOf() const { return shape::stride(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> int NDArray<T>::rankOf() const { if (isEmpty()) return 0; return shape::rank(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::lengthOf() const { return _length; } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::rows() const { if (this->rankOf() == 1) return 1; if (this->rankOf() > 2) throw std::runtime_error("Array with rank > 2 can't have rows"); return shapeOf()[0]; } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::columns() const { if (this->rankOf() == 1) return this->lengthOf(); if (this->rankOf() > 2) throw std::runtime_error("Array with rank > 2 can't have columns"); return shapeOf()[1]; } ////////////////////////////////////////////////////////////////////////// template<typename T> int NDArray<T>::sizeOfT() const { return sizeof(T); } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::ews() const { if (this->isEmpty() || this->rankOf() == 0) return 1; return shape::elementWiseStride(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::nonNull() const { if (isEmpty()) return true; return this->_buffer != nullptr && this->_shapeInfo != nullptr; } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isMatrix() const { if (isEmpty()) return false; return shape::isMatrix(this->_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isVector() const { if (isEmpty()) return false; return !isScalar() && shape::isVector(this->_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isColumnVector() const { if (isEmpty()) return false; return !isScalar() && shape::isColumnVector(this->_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isRowVector() const { if (isEmpty()) return false; // 1D edge case if (shape::rank(this->_shapeInfo) == 1) return true; return !isScalar() && shape::isRowVector(this->_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isScalar() const { return shape::isScalar(this->_shapeInfo); } // accessing operator for matrix, i - absolute index template<typename T> T NDArray<T>::operator()(const Nd4jLong i) const { if (i >= shape::length(_shapeInfo)) throw std::invalid_argument("NDArray::operator(i): dinput index is out of array length !"); auto ews = shape::elementWiseStride(_shapeInfo); char order = ordering(); if(ews == 1 && order == 'c') return _buffer[i]; else if(ews > 1 && order == 'c') return _buffer[i*ews]; else { Nd4jLong idx[MAX_RANK]; shape::ind2subC(rankOf(), shapeOf(), i, idx); Nd4jLong offset = shape::getOffset(0, shapeOf(), stridesOf(), idx, rankOf()); return _buffer[offset]; } } ////////////////////////////////////////////////////////////////////////// // modifying operator for matrix, i - absolute index template<typename T> T& NDArray<T>::operator()(const Nd4jLong i) { if (i >= shape::length(_shapeInfo)) throw std::invalid_argument("NDArray::operator(i): input index is out of array length !"); auto ews = shape::elementWiseStride(_shapeInfo); auto order = ordering(); if(ews == 1 && order == 'c') return _buffer[i]; else if(ews > 1 && order == 'c') return _buffer[i*ews]; else { Nd4jLong idx[MAX_RANK]; shape::ind2subC(rankOf(), shapeOf(), i, idx); auto offset = shape::getOffset(0, shapeOf(), stridesOf(), idx, rankOf()); return _buffer[offset]; } } ////////////////////////////////////////////////////////////////////////// // accessing operator for 2D matrix, i - row, j - column template<typename T> T NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j) const { if (rankOf() != 2 || i >= shapeOf()[0] || j >= shapeOf()[1]) throw std::invalid_argument("NDArray::operator(i,j): one of input indexes is out of array length or rank!=2 !"); Nd4jLong coords[2] = {i, j}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } ////////////////////////////////////////////////////////////////////////// // modifying operator for 2D matrix, i - row, j - column template<typename T> T& NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j) { if (rankOf() != 2 || i >= shapeOf()[0] || j >= shapeOf()[1]) throw std::invalid_argument("NDArray::operator(i,j): one of input indexes is out of array length or rank!=2 !"); Nd4jLong coords[2] = {i, j}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } ////////////////////////////////////////////////////////////////////////// // accessing operator for 3D array, i - row, j - column template<typename T> T NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const { if (rankOf() != 3 || i >= shapeOf()[0] || j >= shapeOf()[1] || j >= shapeOf()[2]) throw std::invalid_argument("NDArray::operator(i,j,k): one of input indexes is out of array length or rank!=3 !"); Nd4jLong coords[3] = {i, j, k}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } ////////////////////////////////////////////////////////////////////////// // modifying operator for 3D array template<typename T> T& NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) { if (rankOf() != 3 || i >= shapeOf()[0] || j >= shapeOf()[1] || k >= shapeOf()[2]) throw std::invalid_argument("NDArray::operator(i,j,k): one of input indexes is out of array length or rank!=3 !"); Nd4jLong coords[3] = {i, j, k}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } template<typename T> T NDArray<T>::operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w) const { if (rankOf() != 4 || t >= shapeOf()[0] || u >= shapeOf()[1] || v >= shapeOf()[2] || w >= shapeOf()[3]) throw std::invalid_argument("NDArray::operator(t,u,v,w): one of input indexes is out of array length or rank!=4 !"); Nd4jLong coords[4] = {t, u, v, w}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } template<typename T> T& NDArray<T>::operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w) { if (rankOf() != 4 || t >= shapeOf()[0] || u >= shapeOf()[1] || v >= shapeOf()[2] || w >= shapeOf()[3]) throw std::invalid_argument("NDArray::operator(t,u,v,w): one of input indexes is out of array length or rank!=4 !"); Nd4jLong coords[4] = {t, u, v, w}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } ////////////////////////////////////////////////////////////////////////// // Return value from linear buffer template<typename T> T NDArray<T>::getScalar(const Nd4jLong i) const { return (*this)(i); } ////////////////////////////////////////////////////////////////////////// template<typename T> T NDArray<T>::getIndexedScalar(const Nd4jLong i) const { return (*this)(i); } ////////////////////////////////////////////////////////////////////////// // Returns value from 2D matrix by coordinates/indexes template<typename T> T NDArray<T>::getScalar(const Nd4jLong i, const Nd4jLong j) const { return (*this)(i, j); } ////////////////////////////////////////////////////////////////////////// // returns value from 3D tensor by coordinates template<typename T> T NDArray<T>::getScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const { return (*this)(i, j, k); } ////////////////////////////////////////////////////////////////////////// template<typename T> void NDArray<T>::putIndexedScalar(const Nd4jLong i, const T value) { (*this)(i) = value; } ////////////////////////////////////////////////////////////////////////// // This method sets value in linear buffer to position i template<typename T> void NDArray<T>::putScalar(const Nd4jLong i, const T value) { (*this)(i) = value; } ////////////////////////////////////////////////////////////////////////// // This method sets value in 2D matrix to position i, j template<typename T> void NDArray<T>::putScalar(const Nd4jLong i, const Nd4jLong j, const T value) { (*this)(i,j) = value; } ////////////////////////////////////////////////////////////////////////// // This method sets value in 3D matrix to position i,j,k template<typename T> void NDArray<T>::putScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k, const T value) { (*this)(i,j,k) = value; } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::memoryFootprint() { Nd4jLong size = this->lengthOf() * this->sizeOfT(); size += shape::shapeInfoByteLength(this->rankOf()); return size; } ////////////////////////////////////////////////////////////////////////// // returns true if these two NDArrays have same shape // still the definition of inline function must be in header file template<typename T> bool NDArray<T>::isSameShape(const std::vector<Nd4jLong>& other) const{ if (this->rankOf() != (int) other.size()) return false; for (int e = 0; e < this->rankOf(); e++) { if (this->shapeOf()[e] != other.at(e) && other.at(e) != -1) return false; } return true; } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isSameShape(const NDArray<T> *other) const { if (this->isEmpty() != other->isEmpty()) return false; return isSameShape(std::vector<Nd4jLong>(other->_shapeInfo+1, other->_shapeInfo+1+other->_shapeInfo[0])); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isSameShape(NDArray<T> &other) const { return isSameShape(&other); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isSameShape(const std::initializer_list<Nd4jLong>& other) const { return isSameShape(std::vector<Nd4jLong>(other)); } ////////////////////////////////////////////////////////////////////////// // returns true if these two NDArrays have same _shapeInfo // still the definition of inline function must be in header file template<typename T> bool NDArray<T>::isSameShapeStrict(const NDArray<T> *other) const { return shape::equalsStrict(_shapeInfo, other->_shapeInfo); } template<typename T> bool NDArray<T>::isEmpty() const { return ArrayOptions::arrayType(this->getShapeInfo()) == ArrayType::EMPTY; } template <typename T> bool NDArray<T>::operator ==(const NDArray<T> &other) const { if (!this->isSameShape(&other)) return false; return this->equalsTo(&other); } } #endif
GB_binop__pair_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_int8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__pair_int8) // A.*B function (eWiseMult): GB (_AemultB_03__pair_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pair_int8) // A*D function (colscale): GB (_AxD__pair_int8) // D*A function (rowscale): GB (_DxB__pair_int8) // C+=B function (dense accum): GB (_Cdense_accumB__pair_int8) // C+=b function (dense accum): GB (_Cdense_accumb__pair_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_int8) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = 1 #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_INT8 || GxB_NO_PAIR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pair_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__pair_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__pair_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__pair_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pair_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__pair_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pair_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
softmax-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file softmax-inl.h * \brief */ #ifndef MXNET_OPERATOR_NN_SOFTMAX_INL_H_ #define MXNET_OPERATOR_NN_SOFTMAX_INL_H_ #include <algorithm> #include <string> #include <utility> #include <vector> #include <type_traits> #include "../mxnet_op.h" #include "../operator_common.h" #include "../tensor/broadcast_reduce_op.h" #include "../../common/cuda/utils.h" using mshadow::red::limits::MinValue; namespace mxnet { namespace op { namespace mxnet_op { struct softmax_fwd { template<typename AType> MSHADOW_XINLINE static AType Map(float a, AType b) { return AType(expf(a)/b); } template<typename AType> MSHADOW_XINLINE static AType Map(double a, AType b) { return AType(exp(a)/b); } }; struct log_softmax_fwd { template<typename DType> MSHADOW_XINLINE static float Map(DType a, float b) { return a - logf(b); } template<typename DType> MSHADOW_XINLINE static double Map(DType a, double b) { return a - log(b); } }; template<typename OP, bool negate, typename AType, typename DType, typename OType, typename IType, int ndim> inline void Softmax(Stream<cpu> *s, DType *in, OType *out, IType *length, Shape<ndim> shape, int axis, const DType temperature) { index_t M = shape[axis]; if (M == 0) return; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; index_t sa = stride[axis]; if (length == nullptr) { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t base = unravel_dot(i, sshape, stride); DType mmax = negate ? -in[base] : in[base]; DType val; for (index_t j = 1; j < M; ++j) { val = negate ? -in[base + j*sa] : in[base + j*sa]; if (mmax < val) mmax = val; } AType sum = AType(0); DType in_val; // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; sum += std::exp(in_val - mmax); } for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; out[base + j*sa] = OP::Map(in_val - mmax, sum); } } else { for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; sum += std::exp((in_val - mmax)/temperature); } for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum); } } } } else { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t len = static_cast<index_t>(length[i]); index_t base = unravel_dot(i, sshape, stride); DType mmax = negate ? -in[base] : in[base]; DType val; for (index_t j = 1; j < len; ++j) { val = negate ? -in[base + j*sa] : in[base + j*sa]; if (mmax < val) mmax = val; } for (index_t j = len; j < M; ++j) { out[base + j*sa] = OType(0.0f); } AType sum = AType(0); DType in_val; // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime if (temperature == 1.0) { for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; sum += std::exp(in_val - mmax); } for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; out[base + j*sa] = OP::Map(in_val - mmax, sum); } } else { for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; sum += std::exp((in_val - mmax)/temperature); } for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j*sa] : in[base + j*sa]; out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum); } } } } } struct masked_softmax_where { template<typename DType, int ndim> MSHADOW_XINLINE static void Map(index_t id, DType* out, const bool* cond, const DType* x, const double y, Shape<ndim> data_shape, Shape<ndim> mask_shape) { index_t mask_pos = 0; index_t stride = 1; for (index_t i = ndim-1, j = id; i >=0; --i) { auto tmp = j / data_shape[i]; if (mask_shape[i] != 1) { mask_pos += (j - tmp * mask_shape[i]) * stride; } stride *= mask_shape[i]; j = tmp; } KERNEL_ASSIGN(out[id], kWriteTo, (cond[mask_pos] ? x[id] : static_cast<DType>(y))); } }; template<typename OP, bool masked_neg_inf, bool negate, typename AType, typename DType, int ndim> inline void MaskedSoftmax(Stream<cpu> *s, DType *in, DType *out, bool *mask, Shape<ndim> data_shape, Shape<ndim> mask_shape, int axis, const double temperature, bool normalize, const OpContext& ctx) { Tensor<cpu, 1, DType> workspace = ctx.requested[0].get_space_typed<cpu, 1, DType>( Shape1(data_shape.Size()), s); DType* masked_input = TBlob(workspace).dptr<DType>(); double neg = MinValue<DType>(); Kernel<masked_softmax_where, cpu>::Launch(s, data_shape.Size(), masked_input, mask, in, neg, data_shape, mask_shape); int* max_lenghts = nullptr; double masked_value = 0.0; if (masked_neg_inf) masked_value = -INFINITY; Softmax<OP, negate, AType, DType>(s, masked_input, out, max_lenghts, data_shape, axis, temperature); Kernel<masked_softmax_where, cpu>::Launch(s, data_shape.Size(), out, mask, out, masked_value, data_shape, mask_shape); } struct softmax_bwd { template<typename DType, typename AType> MSHADOW_XINLINE static AType Map(DType ograd, DType out, AType sum) { return AType(out * (ograd - sum)); } }; struct log_softmax_bwd { template<typename AType> MSHADOW_XINLINE static AType Map(float ograd, float out, AType sum) { return AType(ograd - expf(out)*sum); } template<typename AType> MSHADOW_XINLINE static AType Map(double ograd, double out, AType sum) { return AType(ograd - exp(out)*sum); } }; template<typename OP1, typename OP2, int Req, bool negate, typename AType, typename DType, typename OType, typename IType, int ndim> inline void SoftmaxGrad(Stream<cpu> *s, OType *out, OType *ograd, DType *igrad, IType *length, Shape<ndim> shape, int axis, const DType temperature) { index_t M = shape[axis]; if (M == 0) return; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; index_t sa = stride[axis]; if (length != nullptr) { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t base = unravel_dot(i, sshape, stride); index_t len = static_cast<index_t>(length[i]); AType sum = AType(0); for (index_t j = 0; j < len; ++j) { sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]); } // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime DType final_result; if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) : OP2::Map(ograd[base + j*sa], out[base + j*sa], sum); final_result = (j < len) ? final_result : DType(0.0f); KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } else { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature : OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature; final_result = (j < len) ? final_result : DType(0.0f); KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } } } else { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t base = unravel_dot(i, sshape, stride); AType sum = AType(0); for (index_t j = 0; j < M; ++j) { sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]); } // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime DType final_result; if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) : OP2::Map(ograd[base + j*sa], out[base + j*sa], sum); KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } else { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature : OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature; KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result); } } } } } template<typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim, typename DType> inline void MaskedSoftmaxGrad(Stream<cpu> *s, DType *out, DType *ograd, DType *igrad, bool *mask, Shape<ndim> data_shape, Shape<ndim> mask_shape, int axis, const double temperature, const OpContext& ctx) { Tensor<cpu, 1, DType> workspace = ctx.requested[0].get_space_typed<cpu, 1, DType>( Shape1(data_shape.Size()), s); DType* masked_ograd = TBlob(workspace).dptr<DType>(); Kernel<masked_softmax_where, cpu>::Launch(s, data_shape.Size(), masked_ograd, mask, ograd, 0.0, data_shape, mask_shape); int* max_lenghts = nullptr; SoftmaxGrad<OP1, OP2, Req, negate, AType, DType, DType, int, ndim>( s, out, masked_ograd, igrad, max_lenghts, data_shape, axis, temperature); Kernel<masked_softmax_where, cpu>::Launch(s, data_shape.Size(), igrad, mask, igrad, 0.0, data_shape, mask_shape); } #ifdef __CUDACC__ template<int x_bits, typename OP, bool negate, typename AType, int ndim, typename DType, typename OType, typename IType> __global__ void softmax_compute_kernel(DType *in, OType *out, IType *length, index_t M, int axis, Shape<ndim> sshape, Shape<ndim> stride, const double temperature) { const unsigned x_size = 1 << x_bits; __shared__ AType smem[x_size]; index_t sa = stride[axis]; index_t base = unravel_dot(blockIdx.x, sshape, stride); index_t x = threadIdx.x; const index_t len = length == nullptr ? M : static_cast<index_t>(length[blockIdx.x]); red::maximum::SetInitValue(smem[x]); for (index_t i = x; i < len; i += x_size) { smem[x] = ::max(smem[x], negate ? -in[base + i*sa] : in[base + i*sa]); } __syncthreads(); cuda::Reduce1D<red::maximum, x_bits>(smem); __syncthreads(); DType smax = smem[0]; __syncthreads(); red::sum::SetInitValue(smem[x]); DType val; for (index_t i = x; i < len; i += x_size) { val = negate ? -in[base + i*sa]:in[base + i*sa]; smem[x] += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature))); } __syncthreads(); cuda::Reduce1D<red::sum, x_bits>(smem); __syncthreads(); AType ssum = smem[0]; __syncthreads(); for (index_t i = x; i < M; i += x_size) { val = negate ? -in[base + i*sa] : in[base + i*sa]; out[base + i*sa] = (i < len) ? OType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) : OType(0.0f); } } const int softmax_threads_per_block = 512; template<typename OP, bool negate, typename AType, typename LType, typename DType, typename OType, typename IType> __global__ void softmax_stride1_compute_kernel(const DType *in, OType *out, IType *length, const index_t M, const double temperature, const int rows_per_block, const index_t total_rows) { __shared__ AType scratch[softmax_threads_per_block]; __shared__ LType persistent_storage[20 * 1024 / sizeof(LType)]; const int warp_size = 32; const int threads_per_row = softmax_threads_per_block / rows_per_block; const int my_local_row = threadIdx.x / threads_per_row; const int my_row = blockIdx.x * rows_per_block + my_local_row; if (my_row >= total_rows) return; const int my_id = threadIdx.x % threads_per_row; const int entries_per_load = sizeof(LType)/sizeof(DType); const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]); // Due to usage of MSHADOW_TYPE_SWITCH macro we are generating // kernels where sizeof(LType) may be less than sizeof(DType), // resulting in entries_per_load being 0. // This is not a valid combination and is being checked against // in the launcher code. This switch here is just to silence // the division by zero warning generated for such invalid cases. const int row_length = entries_per_load > 0 ? M / entries_per_load : 0; const LType* in_aligned = reinterpret_cast<const LType*>(in); size_t base = my_row * row_length; for (index_t i = my_id; i < row_length; i += threads_per_row) { persistent_storage[my_local_row * row_length + i] = in_aligned[base + i]; } DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length); __syncthreads(); DType my_max_value; red::maximum::SetInitValue(my_max_value); for (index_t i = my_id; i < len; i += threads_per_row) { my_max_value = ::max(my_max_value, negate ? -row[i] : row[i]); } scratch[threadIdx.x] = my_max_value; __syncthreads(); for (int size = threads_per_row / 2; size >= warp_size; size /= 2) { if (my_id < size) { scratch[threadIdx.x] = ::max(scratch[threadIdx.x], scratch[threadIdx.x + size]); } __syncthreads(); } if (my_id < warp_size) { AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return ::max(x, y); }); scratch[threadIdx.x] = my_value; } __syncthreads(); DType smax = scratch[threadIdx.x - threadIdx.x % threads_per_row]; __syncthreads(); AType my_sum; red::sum::SetInitValue(my_sum); for (index_t i = my_id; i < len; i += threads_per_row) { const DType val = negate ? -row[i] : row[i]; my_sum += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature))); } scratch[threadIdx.x] = my_sum; __syncthreads(); for (int size = threads_per_row / 2; size >= warp_size; size /= 2) { if (my_id < size) { scratch[threadIdx.x] += scratch[threadIdx.x + size]; } __syncthreads(); } if (my_id < warp_size) { AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return x + y;}); scratch[threadIdx.x] = my_value; } __syncthreads(); AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row]; __syncthreads(); for (index_t i = my_id; i < M; i += threads_per_row) { const DType val = negate ? -row[i] : row[i]; row[i] = (i < len) ? DType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) : DType(0.0f); } __syncthreads(); LType* out_aligned = reinterpret_cast<LType*>(out); for (index_t i = my_id; i < row_length; i += threads_per_row) { out_aligned[base + i] = persistent_storage[my_local_row * row_length + i]; } } template<int ndim> MSHADOW_XINLINE index_t get_mask_position(const index_t idx, const Shape<ndim>& data_shape, const Shape<ndim>& mask_shape, int axis, index_t* stride_axis) { index_t ret = 0; index_t stride = 1; *stride_axis = 1; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / data_shape[i]; if (i != axis && mask_shape[i] != 1) { ret += (j - tmp * mask_shape[i]) * stride; if (i > axis) *stride_axis *= mask_shape[i]; } stride *= mask_shape[i]; j = tmp; } return ret; } template<bool normalize, int x_bits, typename OP, bool masked_neg_inf, bool negate, typename AType, int ndim, typename DType> __global__ void masked_softmax_kernel(DType *in, DType *out, bool *in_mask, index_t M, int axis, Shape<ndim> sshape, Shape<ndim> stride, Shape<ndim> mask_shape, const double temperature) { extern __shared__ double shared[]; AType* smem = reinterpret_cast<AType*>(shared); // x_size const unsigned x_size = 1 << x_bits; index_t sa = stride[axis]; index_t base = unravel_dot(blockIdx.x, sshape, stride); index_t sa_mask = 0; index_t base_mask = get_mask_position(blockIdx.x, sshape, mask_shape, axis, &sa_mask); bool bcst_mask_axis = (mask_shape[axis] == 1); index_t x = threadIdx.x; DType smax = 0.0; if (normalize) { red::maximum::SetInitValue(smem[x]); for (index_t i = x; i < M; i += x_size) { bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i*sa_mask]; if (mask_value) smem[x] = ::max(smem[x], negate ? -in[base + i*sa] : in[base + i*sa]); } __syncthreads(); cuda::Reduce1D<red::maximum, x_bits>(smem); __syncthreads(); smax = smem[0]; __syncthreads(); } red::sum::SetInitValue(smem[x]); DType val; for (index_t i = x; i < M; i += x_size) { bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i*sa_mask]; if (mask_value) { val = (negate ? -in[base + i*sa]:in[base + i*sa]); smem[x] += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature))); } } __syncthreads(); cuda::Reduce1D<red::sum, x_bits>(smem); __syncthreads(); AType ssum = smem[0]; __syncthreads(); double masked_value = 0.0; if (masked_neg_inf) masked_value = -INFINITY; for (index_t i = x; i < M; i += x_size) { val = (negate ? -in[base + i*sa] : in[base + i*sa]); bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i*sa_mask]; out[base + i*sa] = mask_value ? DType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) : DType(masked_value); } } template<bool normalize, typename OP, bool masked_neg_inf, bool negate, typename AType, typename LType, typename LTypeMask, typename DType, int ndim> __global__ void masked_softmax_stride1_kernel(const DType *in, DType *out, bool *in_mask, const index_t M, int axis, Shape<ndim> sshape, Shape<ndim> mask_shape, const double temperature, const int rows_per_block, const index_t total_rows, const size_t size_input_shared, const size_t size_mask_shared) { const int entries_per_load = sizeof(LType)/sizeof(DType); const int entries_per_load_mask = sizeof(LTypeMask)/sizeof(bool); const int row_length = entries_per_load > 0 ? M / entries_per_load : 0; const int row_length_mask = entries_per_load > 0 ? M / entries_per_load_mask : 0; extern __shared__ double shared[]; LType* persistent_storage = reinterpret_cast<LType*>(shared); // rows_per_block * M (DType), aligned to double LTypeMask* mask_shared = reinterpret_cast<LTypeMask*>(&shared[size_input_shared]); // rows_per_block * M (bool), aligned to double AType* scratch = reinterpret_cast<AType*>(&shared[size_input_shared + size_mask_shared]); // softmax_threads_per_block const int warp_size = 32; const int threads_per_row = softmax_threads_per_block / rows_per_block; const int my_local_row = threadIdx.x / threads_per_row; const int my_row = blockIdx.x * rows_per_block + my_local_row; if (my_row >= total_rows) return; const int my_id = threadIdx.x % threads_per_row; size_t base = my_row * row_length; index_t pos_mask = 0; index_t stride = mask_shape[axis]; #pragma unroll for (index_t i = axis-1, j = my_row; i >=0; --i) { auto tmp = j / sshape[i]; if (mask_shape[i] != 1) { pos_mask += (j - tmp * mask_shape[i]) * stride; stride *= mask_shape[i]; } j = tmp; } const LType* in_aligned = reinterpret_cast<const LType*>(in); for (index_t i = my_id; i < row_length; i += threads_per_row) { persistent_storage[my_local_row * row_length + i] = in_aligned[base + i]; } const LTypeMask* in_mask_aligned = reinterpret_cast<const LTypeMask*>(&in_mask[pos_mask]); for (index_t i = my_id; i < row_length_mask; i += threads_per_row) { mask_shared[my_local_row * row_length_mask + i] = (mask_shape[axis] > 1) ? in_mask_aligned[i] : in_mask_aligned[0]; } DType* row = reinterpret_cast<DType*>(persistent_storage + my_local_row * row_length); bool* row_mask = reinterpret_cast<bool*>(mask_shared + my_local_row * row_length_mask); __syncthreads(); DType smax = 0.0; if (normalize) { DType my_max_value; red::maximum::SetInitValue(my_max_value); for (index_t i = my_id; i < M; i += threads_per_row) { if (row_mask[i]) my_max_value = ::max(my_max_value, negate ? -row[i] : row[i]); } scratch[threadIdx.x] = my_max_value; __syncthreads(); for (int size = threads_per_row / 2; size >= warp_size; size /= 2) { if (my_id < size) { scratch[threadIdx.x] = ::max(scratch[threadIdx.x], scratch[threadIdx.x + size]); } __syncthreads(); } if (my_id < warp_size) { AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return ::max(x, y); }); scratch[threadIdx.x] = my_value; } __syncthreads(); smax = scratch[threadIdx.x - threadIdx.x % threads_per_row]; __syncthreads(); } AType my_sum; red::sum::SetInitValue(my_sum); for (index_t i = my_id; i < M; i += threads_per_row) { if (row_mask[i]) { const DType val = (negate ? -row[i] : row[i]); my_sum += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature))); } } scratch[threadIdx.x] = my_sum; __syncthreads(); for (int size = threads_per_row / 2; size >= warp_size; size /= 2) { if (my_id < size) { scratch[threadIdx.x] += scratch[threadIdx.x + size]; } __syncthreads(); } if (my_id < warp_size) { AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return x + y;}); scratch[threadIdx.x] = my_value; } __syncthreads(); AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row]; __syncthreads(); double masked_value = 0.0; if (masked_neg_inf) masked_value = -INFINITY; for (index_t i = my_id; i < M; i += threads_per_row) { const DType val = (negate ? -row[i] : row[i]); row[i] = row_mask[i] ? DType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) : DType(masked_value); } __syncthreads(); LType* out_aligned = reinterpret_cast<LType*>(out); for (index_t i = my_id; i < row_length; i += threads_per_row) { out_aligned[base + i] = persistent_storage[my_local_row * row_length + i]; } } template<typename OP, bool negate, typename AType, typename DType, typename OType, typename IType, int ndim> inline void Softmax(Stream<gpu> *s, DType *in, OType *out, IType *length, Shape<ndim> shape, int axis, const double temperature) { const int x_bits = 7; const int x_size = 1 << x_bits; index_t M = shape[axis]; if (M == 0 || shape.Size() == 0) return; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; const size_t DSize = sizeof(DType); // Using 20 kB of shared memory for persistent storage in the optimized case const size_t max_opt_M = 20 * 1024 / DSize; if (stride[axis] == 1 && static_cast<size_t>(M) <= max_opt_M && std::is_same<DType, OType>::value) { int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType)); MXNET_LOAD_TYPE_SWITCH(ltype, LType, { int rows_per_block = mxnet::common::cuda::get_rows_per_block(M * sizeof(DType) / sizeof(LType), softmax_threads_per_block); int nblocks = (N + rows_per_block - 1) / rows_per_block; CHECK_LE(sizeof(DType), sizeof(LType)); softmax_stride1_compute_kernel<OP, negate, AType, LType> <<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>( in, out, length, M, temperature, rows_per_block, N); }); MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_compute_kernel); } else { softmax_compute_kernel<x_bits, OP, negate, AType, ndim> <<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>( in, out, length, M, axis, sshape, stride, temperature); MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_compute_kernel); } } template<typename OP, bool masked_neg_inf, bool negate, typename AType, typename DType, typename OType, int ndim> inline void MaskedSoftmax(Stream<gpu> *s, DType *in, OType *out, bool *mask, Shape<ndim> data_shape, Shape<ndim> mask_shape, int axis, const double temperature, bool normalize, const OpContext& ctx) { const int x_bits = 7; const int x_size = 1 << x_bits; index_t M = data_shape[axis]; if (M == 0 || data_shape.Size() == 0) return; index_t N = data_shape.Size() / M; Shape<ndim> stride = calc_stride(data_shape); Shape<ndim> sshape = data_shape; sshape[axis] = 1; const size_t DSize = sizeof(DType); // Using max of 20 kB of shared memory for InputData in the optimized case const size_t max_opt_M = 20 * 1024 / DSize; if (stride[axis] == 1 && static_cast<size_t>(M) <= max_opt_M && std::is_same<DType, OType>::value) { int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType)); int ltype_mask = mxnet::common::cuda::get_load_type(mask_shape[axis] * sizeof(bool)); MXNET_LOAD_TYPE_SWITCH(ltype, LType, { CHECK_LE(sizeof(DType), sizeof(LType)); MXNET_LOAD_TYPE_SWITCH(ltype_mask, LTypeMask, { CHECK_LE(sizeof(bool), sizeof(LTypeMask)); int rows_per_block = mxnet::common::cuda:: get_rows_per_block(M * sizeof(DType) / sizeof(LType), softmax_threads_per_block); // calculate amount shared memory (slots aligned to double) int entries_per_load = entries_per_load = sizeof(LType)/sizeof(DType); int entries_per_load_mask = sizeof(LTypeMask)/sizeof(bool); size_t size_input_shared = entries_per_load > 0 ? rows_per_block * M / entries_per_load : 0; size_t size_mask_shared = entries_per_load_mask > 0 ? rows_per_block * M / entries_per_load_mask : 0; size_input_shared = ((size_input_shared * sizeof(LType) + sizeof(double) - 1) / sizeof(double)); size_mask_shared = ((size_mask_shared * sizeof(LTypeMask) + sizeof(double) - 1) / sizeof(double)); size_t amount_shared = size_input_shared * sizeof(double) + size_mask_shared * sizeof(double) + softmax_threads_per_block * sizeof(AType); int nblocks = (N + rows_per_block - 1) / rows_per_block; if (normalize) { masked_softmax_stride1_kernel<true, OP, masked_neg_inf, negate, AType, LType, LTypeMask> <<<nblocks, softmax_threads_per_block, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>( in, out, mask, M, axis, sshape, mask_shape, temperature, rows_per_block, N, size_input_shared, size_mask_shared); } else { masked_softmax_stride1_kernel<false, OP, masked_neg_inf, negate, AType, LType, LTypeMask> <<<nblocks, softmax_threads_per_block, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>( in, out, mask, M, axis, sshape, mask_shape, temperature, rows_per_block, N, size_input_shared, size_mask_shared); } }); }); MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_stride1_kernel); } else { size_t amount_shared = x_size * sizeof(AType); if (normalize) { masked_softmax_kernel<true, x_bits, OP, masked_neg_inf, negate, AType, ndim> <<<N, x_size, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>( in, out, mask, M, axis, sshape, stride, mask_shape, temperature); } else { masked_softmax_kernel<false, x_bits, OP, masked_neg_inf, negate, AType, ndim> <<<N, x_size, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>( in, out, mask, M, axis, sshape, stride, mask_shape, temperature); } MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_kernel); } } template<typename OP1, typename OP2, int Req, bool negate, typename AType, typename LType, typename DType, typename OType, typename IType> __global__ void softmax_stride1_grad_kernel(const OType *out, const OType *ograd, DType *igrad, const IType *length, const index_t M, const double temperature, const int rows_per_block, const index_t total_rows) { __shared__ AType scratch[softmax_threads_per_block]; __shared__ LType persistent_storage[20 * 1024 / sizeof(LType)]; const int warp_size = 32; const int threads_per_row = softmax_threads_per_block / rows_per_block; const int my_local_row = threadIdx.x / threads_per_row; const int my_row = blockIdx.x * rows_per_block + my_local_row; if (my_row >= total_rows) return; const int my_id = threadIdx.x % threads_per_row; const int entries_per_load = sizeof(LType)/sizeof(DType); const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]); // Due to usage of MSHADOW_TYPE_SWITCH macro we are generating // kernels where sizeof(LType) may be less than sizeof(DType), // resulting in entries_per_load being 0. // This is not a valid combination and is being checked against // in the launcher code. This switch here is just to silence // the division by zero warning generated for such invalid cases. const int row_length = entries_per_load > 0 ? M / entries_per_load : 0; const LType* out_aligned = reinterpret_cast<const LType*>(out); const LType* ograd_aligned = reinterpret_cast<const LType*>(ograd); size_t base = my_row * row_length; for (index_t i = my_id; i < row_length; i += threads_per_row) { persistent_storage[my_local_row * row_length * 2 + i] = out_aligned[base + i]; persistent_storage[my_local_row * row_length * 2 + row_length + i] = ograd_aligned[base + i]; } DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length * 2); __syncthreads(); AType my_sum_value; red::sum::SetInitValue(my_sum_value); for (index_t i = my_id; i < len; i += threads_per_row) { my_sum_value += OP1::Map(row[i + M], row[i]); } scratch[threadIdx.x] = my_sum_value; __syncthreads(); for (int size = threads_per_row / 2; size >= warp_size; size /= 2) { if (my_id < size) { scratch[threadIdx.x] = scratch[threadIdx.x] + scratch[threadIdx.x + size]; } __syncthreads(); } if (my_id < warp_size) { AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return x + y; }); scratch[threadIdx.x] = my_value; } __syncthreads(); AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row]; __syncthreads(); for (index_t i = my_id; i < M; i += threads_per_row) { const DType val = negate ? -OP2::Map(row[i + M], row[i], ssum) : OP2::Map(row[i + M], row[i], ssum); row[i] = (i < len) ? DType(val / static_cast<DType>(temperature)) : DType(0.0f); if (Req == kAddTo) { row[i] += igrad[my_row * M + i]; } } __syncthreads(); LType* igrad_aligned = reinterpret_cast<LType*>(igrad); for (index_t i = my_id; i < row_length; i += threads_per_row) { igrad_aligned[base + i] = persistent_storage[my_local_row * row_length * 2 + i]; } } template<int x_bits, typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim, typename DType, typename OType, typename IType> __global__ void softmax_grad_kernel(OType *out, OType *ograd, DType *igrad, const IType *length, index_t M, int axis, Shape<ndim> sshape, Shape<ndim> stride, const double temperature) { const unsigned x_size = 1 << x_bits; __shared__ AType smem[x_size]; index_t sa = stride[axis]; index_t base = unravel_dot(blockIdx.x, sshape, stride); index_t x = threadIdx.x; index_t len = length != nullptr ? static_cast<index_t>(length[blockIdx.x]) : M; red::sum::SetInitValue(smem[x]); for (index_t i = x; i < len; i += x_size) { smem[x] += OP1::Map(ograd[base + i*sa], out[base + i*sa]); } __syncthreads(); cuda::Reduce1D<red::sum, x_bits>(smem); __syncthreads(); AType ssum = smem[0]; __syncthreads(); DType final_result; for (index_t i = x; i < M; i += x_size) { final_result = negate ? -OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum) : OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum); final_result = (i < len) ? final_result : DType(0.0f); KERNEL_ASSIGN(igrad[base + i*sa], Req, final_result / static_cast<DType>(temperature)); } } template<typename OP1, typename OP2, int Req, bool negate, typename AType, typename LType, typename LTypeMask, typename DType, typename OType, int ndim> __global__ void masked_softmax_stride1_grad_kernel(const OType *out, const OType *ograd, DType *igrad, const bool *in_mask, const index_t M, int axis, Shape<ndim> sshape, Shape<ndim> mask_shape, const double temperature, const int rows_per_block, const index_t total_rows, const size_t size_input_shared, const size_t size_mask_shared) { const int entries_per_load = sizeof(LType)/sizeof(DType); const int entries_per_load_mask = sizeof(LTypeMask)/sizeof(bool); const int row_length = entries_per_load > 0 ? M / entries_per_load : 0; const int row_length_mask = entries_per_load > 0 ? M / entries_per_load_mask : 0; extern __shared__ double shared[]; LType* persistent_storage = reinterpret_cast<LType*>(shared); // 2 * rows_per_block * M (DType), aligned to double LTypeMask* mask_shared = reinterpret_cast<LTypeMask*>(&shared[size_input_shared]); // rows_per_block * M (bool), aligned to double AType* scratch = reinterpret_cast<AType*>(&shared[size_input_shared + size_mask_shared]); // softmax_threads_per_block const int warp_size = 32; const int threads_per_row = softmax_threads_per_block / rows_per_block; const int my_local_row = threadIdx.x / threads_per_row; const int my_row = blockIdx.x * rows_per_block + my_local_row; if (my_row >= total_rows) return; const int my_id = threadIdx.x % threads_per_row; size_t base = my_row * row_length; index_t pos_mask = 0; index_t stride = mask_shape[axis]; #pragma unroll for (index_t i = axis - 1, j = my_row; i >=0; --i) { auto tmp = j / sshape[i]; if (mask_shape[i] != 1) { pos_mask += (j - tmp * mask_shape[i]) * stride; stride *= mask_shape[i]; } j = tmp; } const LType* out_aligned = reinterpret_cast<const LType*>(out); const LType* ograd_aligned = reinterpret_cast<const LType*>(ograd); for (index_t i = my_id; i < row_length; i += threads_per_row) { persistent_storage[my_local_row * row_length * 2 + i] = out_aligned[base + i]; persistent_storage[my_local_row * row_length * 2 + row_length + i] = ograd_aligned[base + i]; } const LTypeMask* in_mask_aligned = reinterpret_cast<const LTypeMask*>(&in_mask[pos_mask]); for (index_t i = my_id; i < row_length_mask; i += threads_per_row) { mask_shared[my_local_row * row_length_mask + i] = (mask_shape[axis] > 1) ? in_mask_aligned[i] : in_mask_aligned[0]; } DType* row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length * 2); bool* row_mask = reinterpret_cast<bool*>(mask_shared + my_local_row * row_length_mask); __syncthreads(); AType my_sum_value; red::sum::SetInitValue(my_sum_value); for (index_t i = my_id; i < M; i += threads_per_row) { if (row_mask[i]) my_sum_value += OP1::Map(row[i + M], row[i]); } scratch[threadIdx.x] = my_sum_value; __syncthreads(); for (int size = threads_per_row / 2; size >= warp_size; size /= 2) { if (my_id < size) { scratch[threadIdx.x] = scratch[threadIdx.x] + scratch[threadIdx.x + size]; } __syncthreads(); } if (my_id < warp_size) { AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return x + y; }); scratch[threadIdx.x] = my_value; } __syncthreads(); AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row]; __syncthreads(); for (index_t i = my_id; i < M; i += threads_per_row) { const DType val = negate ? -OP2::Map(row[i + M], row[i], ssum): OP2::Map(row[i + M], row[i], ssum); row[i] = row_mask[i] ? DType(val / static_cast<DType>(temperature)) : DType(0.0f); if (Req == kAddTo) { row[i] += igrad[my_row * M + i]; } } __syncthreads(); LType* igrad_aligned = reinterpret_cast<LType*>(igrad); for (index_t i = my_id; i < row_length; i += threads_per_row) { igrad_aligned[base + i] = persistent_storage[my_local_row * row_length * 2 + i]; } } template<int x_bits, typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim, typename DType, typename OType> __global__ void masked_softmax_grad_kernel(OType *out, OType *ograd, DType *igrad, const bool *in_mask, index_t M, int axis, Shape<ndim> sshape, Shape<ndim> stride, Shape<ndim> mask_shape, const double temperature) { const unsigned x_size = 1 << x_bits; __shared__ AType smem[x_size]; index_t sa = stride[axis]; index_t base = unravel_dot(blockIdx.x, sshape, stride); index_t sa_mask = 0; index_t base_mask = get_mask_position(blockIdx.x, sshape, mask_shape, axis, &sa_mask); bool bcst_mask_axis = (mask_shape[axis] == 1); index_t x = threadIdx.x; red::sum::SetInitValue(smem[x]); for (index_t i = x; i < M; i += x_size) { bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i*sa_mask]; if (mask_value) smem[x] += OP1::Map(ograd[base + i*sa], out[base + i*sa]); } __syncthreads(); cuda::Reduce1D<red::sum, x_bits>(smem); __syncthreads(); AType ssum = smem[0]; __syncthreads(); DType final_result; for (index_t i = x; i < M; i += x_size) { bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i*sa_mask]; final_result = negate ? -OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum): OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum); final_result = mask_value ? final_result / static_cast<DType>(temperature) : DType(0.0f); KERNEL_ASSIGN(igrad[base + i*sa], Req, final_result); } } template<typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim, typename DType, typename OType, typename IType> inline void SoftmaxGrad(Stream<gpu> *s, OType *out, OType *ograd, DType *igrad, IType *length, Shape<ndim> shape, int axis, const double temperature) { const int x_bits = 7; const int x_size = 1 << x_bits; index_t M = shape[axis]; if (M == 0 || shape.Size() == 0) return; index_t N = shape.Size()/M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; const size_t DSize = sizeof(DType); // Using 20 kB of shared memory for persistent storage in the optimized case // Need to store both out and ograd, so M can be only half compared to // forward pass. const size_t max_opt_M = 20 * 1024 / DSize / 2; if (stride[axis] == 1 && static_cast<size_t>(M) <= max_opt_M && std::is_same<DType, OType>::value) { int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType)); MXNET_LOAD_TYPE_SWITCH(ltype, LType, { int rows_per_block = mxnet::common::cuda::get_rows_per_block(M * sizeof(DType) / sizeof(LType), softmax_threads_per_block); int nblocks = (N + rows_per_block - 1) / rows_per_block; CHECK_LE(sizeof(DType), sizeof(LType)); softmax_stride1_grad_kernel<OP1, OP2, Req, negate, AType, LType> <<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>( out, ograd, igrad, length, M, temperature, rows_per_block, N); }); MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_grad_kernel); } else { softmax_grad_kernel<x_bits, OP1, OP2, Req, negate, AType, ndim> <<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>( out, ograd, igrad, length, M, axis, sshape, stride, temperature); MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_grad_kernel); } } template<typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim, typename DType, typename OType> inline void MaskedSoftmaxGrad(Stream<gpu> *s, OType *out, OType *ograd, DType *igrad, bool *mask, Shape<ndim> data_shape, Shape<ndim> mask_shape, int axis, const double temperature, const OpContext& ctx) { const int x_bits = 7; const int x_size = 1 << x_bits; index_t M = data_shape[axis]; if (M == 0 || data_shape.Size() == 0) return; index_t N = data_shape.Size() / M; Shape<ndim> stride = calc_stride(data_shape); Shape<ndim> sshape = data_shape; sshape[axis] = 1; const size_t DSize = sizeof(DType); // Using max of 20 kB of shared memory for InputData in the optimized case const size_t max_opt_M = 20 * 1024 / DSize; if (stride[axis] == 1 && static_cast<size_t>(M) <= max_opt_M && std::is_same<DType, OType>::value) { int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType)); int ltype_mask = mxnet::common::cuda::get_load_type(mask_shape[axis] * sizeof(bool)); MXNET_LOAD_TYPE_SWITCH(ltype, LType, { CHECK_LE(sizeof(DType), sizeof(LType)); MXNET_LOAD_TYPE_SWITCH(ltype_mask, LTypeMask, { CHECK_LE(sizeof(bool), sizeof(LTypeMask)); int rows_per_block = mxnet::common::cuda:: get_rows_per_block(M * sizeof(DType) / sizeof(LType), softmax_threads_per_block); // calculate amount shared memory (slots aligned to double) int entries_per_load = entries_per_load = sizeof(LType)/sizeof(DType); int entries_per_load_mask = sizeof(LTypeMask)/sizeof(bool); size_t size_input_shared = entries_per_load > 0 ? rows_per_block * M / entries_per_load : 0; size_t size_mask_shared = entries_per_load_mask > 0 ? rows_per_block * M / entries_per_load_mask : 0; size_input_shared = ((2 * size_input_shared * sizeof(LType) + sizeof(double) - 1) / sizeof(double)); size_mask_shared = ((size_mask_shared * sizeof(LTypeMask) + sizeof(double) - 1) / sizeof(double)); size_t amount_shared = size_input_shared * sizeof(double) + size_mask_shared * sizeof(double) + softmax_threads_per_block * sizeof(AType); int nblocks = (N + rows_per_block - 1) / rows_per_block; masked_softmax_stride1_grad_kernel<OP1, OP2, Req, negate, AType, LType, LTypeMask> <<<nblocks, softmax_threads_per_block, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>( out, ograd, igrad, mask, M, axis, sshape, mask_shape, temperature, rows_per_block, N, size_input_shared, size_mask_shared); }); }); MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_stride1_grad_kernel); } else { masked_softmax_grad_kernel<x_bits, OP1, OP2, Req, negate, AType, ndim> <<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>( out, ograd, igrad, mask, M, axis, sshape, stride, mask_shape, temperature); MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_grad_kernel); } } #endif } // namespace mxnet_op struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> { int axis; dmlc::optional<double> temperature; dmlc::optional<int> dtype; dmlc::optional<bool> use_length; DMLC_DECLARE_PARAMETER(SoftmaxParam) { DMLC_DECLARE_FIELD(axis).set_default(-1) .describe("The axis along which to compute softmax."); DMLC_DECLARE_FIELD(temperature).set_default(dmlc::optional<double>()) .describe("Temperature parameter in softmax"); DMLC_DECLARE_FIELD(dtype) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(dmlc::optional<int>()) .describe("DType of the output in case this can't be inferred. " "Defaults to the same as input's dtype if not defined (dtype=None)."); DMLC_DECLARE_FIELD(use_length) .set_default(dmlc::optional<bool>(false)) .describe("Whether to use the length input as a mask over the data input."); } bool operator==(const SoftmaxParam& other) const { return this->axis == other.axis && this->temperature == other.temperature && this->dtype == other.dtype && this->use_length == other.use_length; } }; struct MaskedSoftmaxParam : public dmlc::Parameter<MaskedSoftmaxParam> { int axis; dmlc::optional<double> temperature; dmlc::optional<int> dtype; dmlc::optional<bool> normalize; DMLC_DECLARE_PARAMETER(MaskedSoftmaxParam) { DMLC_DECLARE_FIELD(axis).set_default(-1) .describe("The axis along which to compute softmax."); DMLC_DECLARE_FIELD(temperature).set_default(dmlc::optional<double>()) .describe("Temperature parameter in softmax"); DMLC_DECLARE_FIELD(normalize) .set_default(dmlc::optional<bool>(true)) .describe("Whether to normalize input data x: x = x - max(x)"); } }; static inline bool softmax_has_dtype_override(const nnvm::NodeAttrs& attrs) { const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); return param.dtype.has_value() && param.dtype.value() != -1; } static inline bool softmax_use_length(const nnvm::NodeAttrs& attrs) { const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); return param.use_length.value(); } static inline bool SoftmaxOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), 1); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 2U : 1U); if (softmax_has_dtype_override(attrs)) { TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype.value()); type_assign(&(*in_attrs)[0], (*out_attrs)[0]); return true; } else { std::vector<int> tmp = {in_attrs->at(0)}; return ElemwiseType<1, 1>(attrs, &tmp, out_attrs); } } static inline bool SoftmaxOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(out_attrs->size(), 1U); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), param.use_length.value() ? 2U : 1U); if (param.use_length.value()) { mxnet::TShape& dshape = in_attrs->at(0); mxnet::TShape tmp_shape((dshape.ndim() == 1) ? 1U : dshape.ndim() - 1, 1); int j = 0; int axis = param.axis != -1 ? param.axis : dshape.ndim() - 1; for (int i = 0; i < dshape.ndim(); ++i) { if (i != axis) { tmp_shape[j++] = dshape[i]; } } SHAPE_ASSIGN_CHECK(*in_attrs, 1, tmp_shape); } mxnet::ShapeVector tmp = {in_attrs->at(0)}; return ElemwiseShape<1, 1>(attrs, &tmp, out_attrs); } static inline bool SoftmaxGradOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { if (softmax_use_length(attrs)) { mxnet::ShapeVector ins = {in_attrs->at(0), in_attrs->at(1), in_attrs->at(3)}; mxnet::ShapeVector dgrad = {out_attrs->at(0)}; bool res = ElemwiseShape<3, 1>(attrs, &ins, &dgrad); SHAPE_ASSIGN_CHECK(*in_attrs, 0, ins[0]); SHAPE_ASSIGN_CHECK(*in_attrs, 1, ins[1]); SHAPE_ASSIGN_CHECK(*in_attrs, 3, ins[2]); SHAPE_ASSIGN_CHECK(*out_attrs, 0, dgrad[0]); mxnet::ShapeVector length = {in_attrs->at(2)}; mxnet::ShapeVector lgrad = {out_attrs->at(1)}; res = (res && ElemwiseShape<1, 1>(attrs, &length, &lgrad)); SHAPE_ASSIGN_CHECK(*in_attrs, 2, length[0]); SHAPE_ASSIGN_CHECK(*out_attrs, 1, lgrad[0]); return res; } else { return ElemwiseShape<3, 1>(attrs, in_attrs, out_attrs); } } else { return ElemwiseShape<2, 1>(attrs, in_attrs, out_attrs); } } static inline bool SoftmaxGradOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), softmax_use_length(attrs) ? 2U : 1U); if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 4U : 3U); int in_dtype = (*in_attrs)[1]; int out_dtype = (*in_attrs)[softmax_use_length(attrs) ? 3 : 2]; TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype); TYPE_ASSIGN_CHECK(*out_attrs, 0, in_dtype); if (softmax_use_length(attrs)) { TYPE_ASSIGN_CHECK(*out_attrs, 1, in_attrs->at(2)); } return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1 && (!softmax_use_length(attrs) || ((*out_attrs)[1] != -1 && (*in_attrs)[1] != -1)); } else { CHECK_EQ(in_attrs->size(), 2U); int out_dtype = (*in_attrs)[1]; TYPE_ASSIGN_CHECK(*out_attrs, 0, out_dtype); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype); return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1; } } static inline std::vector<std::pair<int, int> > SoftmaxGradOpInplaceOption(const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { if (softmax_use_length(attrs)) { return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 1}, {3, 0}}; } else { return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 0}}; } } else { return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; } } static inline uint32_t SoftmaxGradOpNumInputs(const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { return softmax_use_length(attrs) ? 4 : 3; } return 2; } static inline std::vector<std::string> SoftmaxGradOpInputNames(const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { if (softmax_use_length(attrs)) { return std::vector<std::string>{"ograd", "data", "length", "output"}; } else { return std::vector<std::string>{"ograd", "data", "output"}; } } else { return std::vector<std::string>{"ograd", "output"}; } } struct SoftmaxFGradient { const char *op_name; std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) const { if (softmax_has_dtype_override(n->attrs) || softmax_use_length(n->attrs)) { return ElemwiseGradUseInOut {op_name}(n, ograds); } else { return ElemwiseGradUseOut {op_name}(n, ograds); } } }; static inline bool MaskedSoftmaxOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), 1); CHECK_EQ(in_attrs->size(), 2U); std::vector<int> tmp = {in_attrs->at(0)}; return ElemwiseType<1, 1>(attrs, &tmp, out_attrs); } static inline bool MaskedSoftmaxOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_shape, mxnet::ShapeVector *out_shape) { CHECK_EQ(out_shape->size(), 1U); CHECK_EQ(in_shape->size(), 2U); mxnet::TShape& data_shape = (*in_shape)[0]; mxnet::TShape& mask_shape = (*in_shape)[1]; if (!mxnet::ndim_is_known(data_shape) || !mxnet::ndim_is_known(mask_shape)) { return false; } CHECK(data_shape.ndim() == mask_shape.ndim()) << "Number of dimensions in data and mask does not match"; CHECK(data_shape.ndim() > 0) << "Empty tuple is not allowed"; for (int i = 0; i < data_shape.ndim(); ++i) { CHECK(data_shape[i] == mask_shape[i] || mask_shape[i] == 1) << "Mask cannot be broadcasted from " << mask_shape << " to " << data_shape; } SHAPE_ASSIGN_CHECK(*out_shape, 0, in_shape->at(0)); SHAPE_ASSIGN_CHECK(*in_shape, 0, out_shape->at(0)); return true; } static inline bool MaskedSoftmaxGradOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_shape, mxnet::ShapeVector *out_shape) { CHECK_EQ(out_shape->size(), 1U); CHECK_EQ(in_shape->size(), 3U); mxnet::TShape& ograd_shape = (*in_shape)[0]; mxnet::TShape& mask_shape = (*in_shape)[1]; if (!mxnet::ndim_is_known(ograd_shape) || !mxnet::ndim_is_known(mask_shape)) { return false; } CHECK(ograd_shape.ndim() == mask_shape.ndim()) << "Number of dimensions in data and mask does not match"; CHECK(ograd_shape.ndim() > 0) << "Empty tuple is not allowed"; for (int i = 0; i < ograd_shape.ndim(); ++i) { CHECK(ograd_shape[i] == mask_shape[i] || mask_shape[i] == 1) << "Mask cannot be broadcasted from " << mask_shape << " to " << ograd_shape; } SHAPE_ASSIGN_CHECK(*out_shape, 0, in_shape->at(0)); SHAPE_ASSIGN_CHECK(*out_shape, 0, in_shape->at(2)); SHAPE_ASSIGN_CHECK(*in_shape, 0, out_shape->at(0)); SHAPE_ASSIGN_CHECK(*in_shape, 2, out_shape->at(0)); return true; } static inline bool MaskedSoftmaxGradOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), 1U); CHECK_EQ(in_attrs->size(), 3U); int data_dtype = (*in_attrs)[0]; TYPE_ASSIGN_CHECK(*in_attrs, 2, data_dtype); TYPE_ASSIGN_CHECK(*out_attrs, 0, data_dtype); data_dtype = (*out_attrs)[0]; TYPE_ASSIGN_CHECK(*in_attrs, 0, data_dtype); return true; } static inline std::vector<std::pair<int, int> > MaskedSoftmaxGradOpInplaceOption(const nnvm::NodeAttrs& attrs) { return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 1}, {3, 0}}; } template<typename xpu, typename OP, bool negate = false> void SoftmaxCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (req[0] == kNullOp || inputs[0].Size() == 0U) return; CHECK_NE(req[0], kAddTo); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true); bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true); if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) { common::LogOnce("MXNET_SAFE_ACCUMULATION=1 is recommended for softmax with float16 inputs. " "See https://mxnet.apache.org/api/faq/env_var " "for more details."); } MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, { int type = kInt32; if (param.use_length.value()) { CHECK(inputs.size() > 1) << "Mask needs to be provided when using softmax with use_length=True."; type = inputs[1].type_flag_; } MXNET_INT32_INT64_TYPE_SWITCH(type, IType, { IType* mask_ptr = nullptr; if (param.use_length.value()) { mask_ptr = inputs[1].dptr<IType>(); } if (safe_acc) { if (shape.ndim() == 2) { Softmax<OP, negate, AType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { Softmax<OP, negate, AType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } else { if (shape.ndim() == 2) { Softmax<OP, negate, DType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { Softmax<OP, negate, DType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } }); }); }); } template<typename xpu, typename OP, bool masked_neg_inf, bool negate = false> void MaskedSoftmaxCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (req[0] == kNullOp || inputs[0].Size() == 0U) return; CHECK_NE(req[0], kAddTo); const MaskedSoftmaxParam& param = nnvm::get<MaskedSoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true); if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) { common::LogOnce("MXNET_SAFE_ACCUMULATION=1 is recommended for masked_softmax with " "float16 inputs. " "See https://mxnet.apache.org/api/faq/env_var " "for more details."); } MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, { MXNET_NDIM_SWITCH(inputs[0].ndim(), ndim, { bool* mask_ptr = inputs[1].dptr<bool>(); if (safe_acc) { MaskedSoftmax<OP, masked_neg_inf, negate, AType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<DType>(), mask_ptr, inputs[0].shape_.get<ndim>(), inputs[1].shape_.get<ndim>(), axis, temperature, param.normalize.value(), ctx); } else { MaskedSoftmax<OP, masked_neg_inf, negate, DType>( ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<DType>(), mask_ptr, inputs[0].shape_.get<ndim>(), inputs[1].shape_.get<ndim>(), axis, temperature, param.normalize.value(), ctx); } }); }); } template<typename xpu, typename OP1, typename OP2, bool negate = false> void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (softmax_use_length(attrs)) { MXNET_INT32_INT64_TYPE_SWITCH(inputs[2].type_flag_, IType, { if (req[1] != kNullOp) { mxnet_op::Kernel<mxnet_op::set_zero, xpu>::Launch( ctx.get_stream<xpu>(), outputs[1].Size(), outputs[1].dptr<IType>()); } }); } if (req[0] == kNullOp) return; const int itype = softmax_use_length(attrs) ? inputs[2].type_flag_ : kInt32; const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true); int out_idx = softmax_has_dtype_override(attrs) ? 2 : 1; out_idx = softmax_use_length(attrs) ? 3 : out_idx; bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true); MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, OType, AType, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MXNET_INT32_INT64_TYPE_SWITCH(itype, IType, { IType * length_ptr = nullptr; if (softmax_use_length(attrs)) { length_ptr = inputs[2].dptr<IType>(); } if (safe_acc) { if (shape.ndim() == 2) { SoftmaxGrad<OP1, OP2, Req, negate, AType>( ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { SoftmaxGrad<OP1, OP2, Req, negate, AType>( ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } else { if (shape.ndim() == 2) { SoftmaxGrad<OP1, OP2, Req, negate, DType>( ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { SoftmaxGrad<OP1, OP2, Req, negate, DType>( ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } }); }); }); }); } template<typename xpu, typename OP1, typename OP2, bool negate = false> void MaskedSoftmaxGradCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; const MaskedSoftmaxParam& param = nnvm::get<MaskedSoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true); MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MXNET_NDIM_SWITCH(inputs[0].ndim(), ndim, { DType* ograd_ptr = inputs[0].dptr<DType>(); DType* out_ptr = inputs[2].dptr<DType>(); bool* mask_ptr = inputs[1].dptr<bool>(); DType* grad_data = outputs[0].dptr<DType>(); if (safe_acc) { MaskedSoftmaxGrad<OP1, OP2, Req, negate, AType>( ctx.get_stream<xpu>(), out_ptr, ograd_ptr, grad_data, mask_ptr, inputs[0].shape_.get<ndim>(), inputs[1].shape_.get<ndim>(), axis, static_cast<DType>(temperature), ctx); } else { MaskedSoftmaxGrad<OP1, OP2, Req, negate, DType>( ctx.get_stream<xpu>(), out_ptr, ograd_ptr, grad_data, mask_ptr, inputs[0].shape_.get<ndim>(), inputs[1].shape_.get<ndim>(), axis, static_cast<DType>(temperature), ctx); } }); }); }); } } // namespace op } // namespace mxnet namespace std { template<> struct hash<mxnet::op::SoftmaxParam> { size_t operator()(const mxnet::op::SoftmaxParam& val) { size_t ret = 0; ret = dmlc::HashCombine(ret, val.axis); ret = dmlc::HashCombine(ret, val.temperature); ret = dmlc::HashCombine(ret, val.dtype); ret = dmlc::HashCombine(ret, val.use_length); return ret; } }; } // namespace std #endif // MXNET_OPERATOR_NN_SOFTMAX_INL_H_
diagmm_x_coo_n_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT n = columns; ALPHA_INT _nnz = mat->nnz; ALPHA_INT num_threads = alpha_get_thread_num(); ALPHA_INT partition[num_threads + 1]; partition[0] = 0; #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT nnz = 0; nnz < _nnz; ++nnz) { ALPHA_INT tid = alpha_get_thread_id(); if(mat->row_indx[nnz] != mat->row_indx[nnz+1]) { partition[tid + 1] = nnz + 1; nnz = _nnz; } } #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT start = partition[tid]; ALPHA_INT end = tid == num_threads - 1 ? _nnz : partition[tid + 1]; ALPHA_INT or = mat->row_indx[start]; for (ALPHA_INT nnz = start; nnz < end; ++nnz) { ALPHA_INT r = mat->row_indx[nnz]; ALPHA_Number *Y = &y[index2(r, 0, ldy)]; while(or <= r) { ALPHA_Number *TY = &y[index2(or, 0, ldy)]; for (ALPHA_INT c = 0; c < n; c++) alpha_mul(TY[c], TY[c], beta); or++; } if (mat->col_indx[nnz] == r) { ALPHA_Number val; alpha_mul(val, alpha, mat->values[nnz]); const ALPHA_Number *X = &x[index2(mat->col_indx[nnz], 0, ldx)]; for (ALPHA_INT c = 0; c < n; ++c) alpha_madde(Y[c], val, X[c]); } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
LinkedCellParallelOptimizedLock.h
#pragma once #include <omp.h> #include "physics/Physics.h" #include "physics/variants/LennardJones.h" #include "container/LinkedCell/LinkedCellContainer.h" #include "LinkedCell.h" /** * This class implements the linked cell algorithm in the form of a parallel algorithm that works with locks and is optimized. * @tparam T The physics to be used * @tparam dim The dimension of our simulation */ template<typename T, size_t dim, typename std::enable_if<std::is_base_of<PhysicsType, T>::value, bool>::type = true> class LinkedCellParallelOptimizedLock : LinkedCellParallelLockFree<LennardJones, dim> { public: //----------------------------------------Constructor---------------------------------------- /** * Default constructor. */ LinkedCellParallelOptimizedLock() = default; //----------------------------------------Methods---------------------------------------- /** * This method calculates the forces between the different particles in the different cells. * @param particleContainer that provides possible required values and functionalities */ void performUpdate(ParticleContainer<dim> &particleContainer) const override; /** * This method calculates the force, position and velocity of the particles in the container. * In addition, the structure is updated appropriately and renewed if needed. * Particles that leave the structure are deleted. * @param particleContainer The ParticleContainer, for whose contents the positions should be calculated * @param deltaT time step of our simulation * @param gravitation additional vector of gravitational force applied on all particles * @param current_time current time of this iteration */ void calculateNextStep(ParticleContainer<dim> &particleContainer, double deltaT, double &gravitation, double current_time) const override { LinkedCell<T, dim>::calculateNextStep(particleContainer, deltaT, gravitation, current_time); } }; /** * This class implements the linked cell algorithm in the form of a parallel algorithm that works without locks and is optimized. * @tparam dim The dimension of our simulation */ template<size_t dim> class LinkedCellParallelOptimizedLock<LennardJones, dim> : public LinkedCellParallelLockFree<LennardJones, dim> { public: //----------------------------------------Constructor---------------------------------------- /** * Default constructor */ LinkedCellParallelOptimizedLock() = default; LinkedCellParallelOptimizedLock(double cutoffRadius, Vector<dim> cellSize, ParticleContainer<dim> &particleContainer) : LinkedCellParallelLockFree<LennardJones, dim>(cutoffRadius, cellSize, particleContainer) { }; //----------------------------------------Methods---------------------------------------- /** * This method calculates the forces between the different particles in the different cells. * @param particleContainer that provides possible required values and functionalities */ void performUpdate(ParticleContainer<dim> &particleContainer) const override { auto &cellContainer = static_cast<LinkedCellContainer<dim> &>(particleContainer); #pragma omp parallel for shared(cellContainer) default(none) schedule(static, 8) for (size_t i = 0; i < cellContainer.getBoundaryCells().size(); ++i) { Boundary<dim> &b = cellContainer.getBoundaryCells()[i]; b.applyCellProperties(); } for (auto &cellVector: LinkedCellParallelLockFree<LennardJones, dim>::cells) { #pragma omp parallel for shared(cellVector, cellContainer) default(none) schedule(static, 4) for (size_t c = 0; c < cellVector.size(); ++c) { Cell<dim> *cell = cellVector[c]; std::vector<Cell<dim> *> &neighbours = cell->getNeighbours(); std::vector<Particle<dim> *> &cellParticles = cell->getParticles(); if (!cellParticles.empty()) { LinkedCell<LennardJones, dim>::calcBetweenNeighboursAndCell(neighbours, cellParticles, cellContainer); LinkedCell<LennardJones, dim>::calcInTheCell(cellParticles, cellContainer); std::vector<std::tuple<Cell<dim> *, Vector<dim>>> &periodicNeighbours = cell->getPeriodicNeighbours(); // Iterate over all periodic neighbours for (std::tuple<Cell<dim> *, Vector<dim>> &t: periodicNeighbours) { // Get the periodic cell which influences the current cell Cell<dim> *periodicCell = std::get<0>(t); if (!periodicCell->getParticles().empty()) { periodicCell->setLock(); for (auto j = periodicCell->getParticles().begin(); j != periodicCell->getParticles().end(); ++j) { // Update the current position of the Particle(s) const Vector<dim> oldPos = (*j)->getX(); Vector<dim> pos; for (size_t index = 0; index < dim; ++index) { pos[index] = oldPos[index] - periodicCell->getPosition()[index] + std::get<1>(t)[index]; } (*j)->setX(pos); cell->setLock(); for (auto i = cellParticles.begin(); i != cellParticles.end(); ++i) { auto force = LinkedCell<LennardJones, dim>::calculateLennardJones(*(*i), *(*j), cellContainer); LinkedCell<LennardJones, dim>::updateForceForParticle(*(*i), *(*j), force); } cell->unsetLock(); (*j)->setX(oldPos); } periodicCell->unsetLock(); } } } } } LinkedCell<LennardJones, dim>::calculateMolecules(cellContainer); } /** * This method calculates the force, position and velocity of the particles in the container. * In addition, the structure is updated appropriately and renewed if needed. * Particles that leave the structure are deleted. * @param particleContainer The ParticleContainer, for whose contents the positions should be calculated * @param deltaT time step of our simulation * @param gravitation additional vector of gravitational force applied on all particles * @param current_time current time of this iteration */ void calculateNextStep(ParticleContainer<dim> &particleContainer, double deltaT, Vector<dim> &gravitation, double current_time) const override { LinkedCell<LennardJones, dim>::calculateNextStep(particleContainer, deltaT, gravitation, current_time); } };
timer.h
/*************************************************************************** * include/stxxl/bits/common/timer.h * * Part of the STXXL. See http://stxxl.sourceforge.net * * Copyright (C) 2002, 2005 Roman Dementiev <dementiev@mpi-sb.mpg.de> * Copyright (C) 2007-2009 Andreas Beckmann <beckmann@cs.uni-frankfurt.de> * Copyright (C) 2008 Johannes Singler <singler@ira.uka.de> * Copyright (C) 2013-2014 Timo Bingmann <tb@panthema.net> * * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) **************************************************************************/ #ifndef STXXL_COMMON_TIMER_HEADER #define STXXL_COMMON_TIMER_HEADER #include <stxxl/bits/config.h> #include <stxxl/bits/namespace.h> #include <stxxl/bits/verbose.h> #include <stxxl/bits/common/utils.h> #if STXXL_BOOST_TIMESTAMP #include <boost/date_time/posix_time/posix_time.hpp> #include <cmath> #elif STXXL_WINDOWS #ifndef NOMINMAX #define NOMINMAX #endif #include <windows.h> #else #include <ctime> #include <sys/time.h> #endif STXXL_BEGIN_NAMESPACE //! \addtogroup support //! \{ //! Returns number of seconds since the epoch, high resolution. inline double timestamp() { #if STXXL_BOOST_TIMESTAMP boost::posix_time::ptime MyTime = boost::posix_time::microsec_clock::local_time(); boost::posix_time::time_duration Duration = MyTime - boost::posix_time::time_from_string("1970-01-01 00:00:00.000"); double sec = double(Duration.hours()) * 3600. + double(Duration.minutes()) * 60. + double(Duration.seconds()) + double(Duration.fractional_seconds()) / (pow(10., Duration.num_fractional_digits())); return sec; #elif STXXL_WINDOWS return GetTickCount64() / 1000.0; #else struct timeval tp; gettimeofday(&tp, NULL); return double(tp.tv_sec) + double(tp.tv_usec) / 1000000.; #endif } /*! * Class timer is a simple stop watch timer. It uses the timestamp() function * to get the current time when start() is called. Then, after some processing, * the function stop() functions can be called, or seconds() and other * accessors can be called directly. */ class timer { //! boolean whether the stopwatch timer is currently running bool running; //! total accumulated time in seconds. double accumulated; //! last start time of the stopwatch double last_clock; //! return current timestamp static inline double timestamp() { return stxxl::timestamp(); } public: //! boolean indicating that this class does real timing static const bool is_real = true; //! initialize and optionally immediately start the timer inline timer(bool start_immediately = false) : running(false), accumulated(0), last_clock(0) { if (start_immediately) start(); } //! start timer inline void start() { running = true; last_clock = timestamp(); } //! stop timer inline void stop() { running = false; accumulated += timestamp() - last_clock; } //! return accumulated time inline void reset() { accumulated = 0.; last_clock = timestamp(); } //! return currently accumulated time in milliseconds inline double mseconds() const { if (running) return (accumulated + timestamp() - last_clock) * 1000.; return (accumulated * 1000.); } //! return currently accumulated time in microseconds inline double useconds() const { if (running) return (accumulated + timestamp() - last_clock) * 1000000.; return (accumulated * 1000000.); } //! return currently accumulated time in seconds (as double) inline double seconds() const { if (running) return (accumulated + timestamp() - last_clock); return (accumulated); } //! accumulate elapsed time from another timer inline timer& operator += (const timer& tm) { #if STXXL_PARALLEL #pragma omp atomic #endif accumulated += tm.seconds(); return *this; } //! direct <<-operator for ostream. Can be used for printing with std::cout. friend std::ostream& operator << (std::ostream& os, const timer& t) { return os << t.seconds() << 's'; } }; /*! * Class fake_timer is a drop-in replacement for timer, which does * nothing. Using the fake class, timers can quickly be disabled in release * builds, but still be available for debugging session. * * \see timer */ class fake_timer { public: //! boolean indicating that this class does NOT do real timing static const bool is_real = false; //! initialize and optionally immediately start the timer fake_timer(bool = false) { } //! start timer void start() { } //! stop timer void stop() { } //! return accumulated time void reset() { } //! return currently accumulated time in milliseconds double mseconds() const { return std::numeric_limits<double>::quiet_NaN(); } //! return currently accumulated time in microseconds double useconds() const { return std::numeric_limits<double>::quiet_NaN(); } //! return currently accumulated time in seconds (as double) double seconds() const { return std::numeric_limits<double>::quiet_NaN(); } //! accumulate elapsed time from another timer inline fake_timer& operator += (const fake_timer&) { return *this; } //! direct <<-operator for ostream. Can be used for printing with std::cout. friend std::ostream& operator << (std::ostream& os, const fake_timer& t) { return os << t.seconds() << 's'; } }; /*! * Simple scoped timer, which takes a text message and prints the duration * until the scope is destroyed. */ class scoped_print_timer { protected: //! message std::string m_message; //! bytes processed uint64 m_bytes; //! timer stxxl::timer m_timer; public: //! save message and start timer scoped_print_timer(const std::string& message, const uint64 bytes = 0) : m_message(message), m_bytes(bytes), m_timer(true) { STXXL_MSG("Starting " << message); } //! on destruction: tell the time ~scoped_print_timer() { if (m_bytes == 0) { STXXL_MSG("Finished " << m_message << " after " << m_timer.seconds() << " seconds"); } else { double bps = (double)m_bytes / m_timer.seconds(); STXXL_MSG("Finished " << m_message << " after " << m_timer.seconds() << " seconds. " << "Processed " << format_IEC_size(m_bytes) << "B" << " @ " << format_IEC_size((uint64)bps) << "B/s"); } } //! constant access to enclosed timer const stxxl::timer & timer() const { return m_timer; } }; //! \} STXXL_END_NAMESPACE #endif // !STXXL_COMMON_TIMER_HEADER // vim: et:ts=4:sw=4
create_SNN_graph2_omp.c
// Author: Fabio Rodrigues Pereira // E-mail: fabior@uio.no #include <stdlib.h> // rand, malloc, calloc and free. #include <stdio.h> // printf #include <omp.h> void create_SNN_graph2_omp(int N, int *row_ptr, int *col_idx, int **SNN_val) { // allocating SNN_val that has the same length of col_idx (*SNN_val) = calloc(row_ptr[N], sizeof **SNN_val); // global variables are shared by default in the parallel region size_t z, x, i, j, row_nr; unsigned long count; #pragma omp parallel for private(z, x, i, j, row_nr) reduction(+:count) for ( z = 0; z < row_ptr[N]; z++ ) { count = 0; for ( x = 0; x < N+1; x++ ) // getting row { if ( z < row_ptr[x] ) { row_nr=x-1; break; } } for ( i = row_ptr[row_nr]; i < row_ptr[row_nr + 1]; i++ ) // in element/row { for ( j = row_ptr[col_idx[z]]; j < row_ptr[col_idx[z] + 1]; j++ ) // in element/col if ( col_idx[i] == col_idx[j] ) count += 1; } (*SNN_val)[z] = count; } }
errorAbs.h
#pragma once #include <cmath> #include <array> #include <vector> #include <algorithm> #include "_cuda.h" #include "ceilDiv.h" #include "sum.h" using namespace std; // Finds absolute error between 2 vectors (arrays). template <class T> T errorAbs(T *x, T *y, int N) { T a = T(); for (int i=0; i<N; i++) a += abs(x[i] - y[i]); return a; } template <class T, size_t N> T errorAbs(array<T, N>& x, array<T, N>& y) { return errorAbs(x.data(), y.data(), x.size()); } template <class T> T errorAbs(vector<T>& x, vector<T>& y) { return errorAbs(x.data(), y.data(), x.size()); } template <class T> T errorAbsOmp(T *x, T *y, int N) { T a = T(); #pragma omp parallel for reduction (+:a) for (int i=0; i<N; i++) a += abs(x[i] - y[i]); return a; } template <class T, size_t N> T errorAbsOmp(array<T, N>& x, array<T, N>& y) { return errorAbsOmp(x.data(), y.data(), x.size()); } template <class T> T errorAbsOmp(vector<T>& x, vector<T>& y) { return errorAbsOmp(x.data(), y.data(), x.size()); } template <class T> __device__ T errorAbsKernelLoop(T *x, T *y, int N, int i, int DI) { T a = T(); for (; i<N; i+=DI) a += abs(x[i] - y[i]); return a; } template <class T> __global__ void errorAbsKernel(T *a, T *x, T *y, int N) { DEFINE(t, b, B, G); __shared__ T cache[_THREADS]; cache[t] = errorAbsKernelLoop(x, y, N, B*b+t, G*B); sumKernelReduce(cache, B, t); if (t == 0) a[b] = cache[0]; } template <class T> T errorAbsCuda(T *x, T *y, int N) { int threads = _THREADS; int blocks = max(ceilDiv(N, threads), 1024); size_t X1 = N * sizeof(T); size_t A1 = blocks * sizeof(T); T *aPartial = (T*) malloc(A1); T *xD, *yD, *aPartialD; TRY( cudaMalloc(&xD, X1) ); TRY( cudaMalloc(&yD, X1) ); TRY( cudaMalloc(&aPartialD, A1) ); TRY( cudaMemcpy(xD, x, X1, cudaMemcpyHostToDevice) ); TRY( cudaMemcpy(yD, y, X1, cudaMemcpyHostToDevice) ); errorAbsKernel<<<blocks, threads>>>(aPartialD, xD, yD, N); TRY( cudaMemcpy(aPartial, aPartialD, A1, cudaMemcpyDeviceToHost) ); TRY( cudaFree(yD) ); TRY( cudaFree(xD) ); TRY( cudaFree(aPartialD) ); return sum(aPartial, blocks); } template <class T, size_t N> T errorAbsCuda(array<T, N>& x, array<T, N>& y) { return errorAbsCuda(x.data(), y.data(), N); } template <class T> T errorAbsCuda(vector<T>& x, vector<T>& y) { return errorAbsCuda(x.data(), y.data(), x.size()); }
loop_meu.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #define TAM 64 * 127 * 5 #define ITERACOES_TESTE 100000 int main() { int i; long soma; int *vetor = calloc(TAM, sizeof(int)); if (vetor == NULL) { printf("Falha ao alocar memória"); return -1; } for (int contador = 0; contador < ITERACOES_TESTE; contador++) { #pragma omp parallel num_threads(2) { //thread1 metade de 5 em 5 if (omp_get_thread_num() == 0) { for (int i = 0; i < TAM / 2 - 4; i += 5) { vetor[i]++; vetor[i + 1]++; vetor[i + 2]++; vetor[i + 3]++; vetor[i + 4]++; } } else if (omp_get_thread_num() == 1) { //thread2 faz outra metade de 5 em 5 for (int i = TAM / 2; i < TAM - 4; i += 5) { vetor[i]++; vetor[i + 1]++; vetor[i + 2]++; vetor[i + 3]++; vetor[i + 4]++; } } } } soma = 0; for (int i = 0; i < TAM; i++) { soma += vetor[i]; } printf("%ld\n", soma); free(vetor); return 0; }
slag2d.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/clag2z.c, mixed zc -> ds, Fri Sep 28 17:38:17 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" /***************************************************************************//** * * @ingroup plasma_lag2 * * Converts m-by-n matrix As from complex single to complex double precision. * ******************************************************************************* * * @param[in] m * The number of rows of the matrix As. m >= 0. * * @param[in] n * The number of columns of the matrix As. n >= 0. * * @param[in] pAs * The ldas-by-n matrix As in single complex precision. * * @param[in] ldas * The leading dimension of the array As. ldas >= max(1,m). * * @param[out] pA * On exit, the lda-by-n matrix A in double complex precision. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_slag2d * @sa plasma_dlag2s * @sa plasma_dlag2s * @sa plasma_slag2d * ******************************************************************************/ int plasma_slag2d(int m, int n, float *pAs, int ldas, double *pA, int lda) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (m < 0) { plasma_error("illegal value of m"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (ldas < imax(1, m)) { plasma_error("illegal value of ldas"); return -4; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -6; } // quick return if (imin(n, m) == 0) return PlasmaSuccess; // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t As; plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb, m, n, 0, 0, m, n, &As); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&As); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_sge2desc(pAs, ldas, As, &sequence, &request); plasma_omp_dge2desc(pA, lda, A, &sequence, &request); // Call tile async function. plasma_omp_slag2d(As, A, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_sdesc2ge(As, pAs, ldas, &sequence, &request); plasma_omp_ddesc2ge(A, pA, lda, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&As); plasma_desc_destroy(&A); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_lag2 * * Converts m-by-n matrix A from single complex to double complex precision. * Non-blocking tile version of plasma_slag2d(). May return before the * computation is finished. Operates on matrices stored by tiles. All matrices * are passed through descriptors. All dimensions are taken from the * descriptors. Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] As * Descriptor of matrix As. * * @param[out] A * Descriptor of matrix A. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check the * sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_slag2d * @sa plasma_omp_dlag2s * @sa plasma_omp_dlag2s * @sa plasma_omp_slag2d * ******************************************************************************/ void plasma_omp_slag2d(plasma_desc_t As, plasma_desc_t A, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(As) != PlasmaSuccess) { plasma_error("invalid As"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (imin(As.m, As.n) == 0) return; // Call the parallel function. plasma_pslag2d(As, A, sequence, request); }
GB_binop__rminus_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__rminus_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__rminus_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_uint8) // A*D function (colscale): GB (_AxD__rminus_uint8) // D*A function (rowscale): GB (_DxB__rminus_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_uint8) // C=scalar+B GB (_bind1st__rminus_uint8) // C=scalar+B' GB (_bind1st_tran__rminus_uint8) // C=A+scalar GB (_bind2nd__rminus_uint8) // C=A'+scalar GB (_bind2nd_tran__rminus_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_UINT8 || GxB_NO_RMINUS_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rminus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rminus_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rminus_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_bug5.c
/****************************************************************************** * FILE: omp_bug5.c * DESCRIPTION: * Using SECTIONS, two threads initialize their own array and then add * it to the other's array, however a deadlock occurs. * AUTHOR: Blaise Barney 01/29/04 * LAST REVISED: 08/15/11 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define N 1000000 #define PI 3.1415926535 #define DELTA .01415926535 int main (int argc, char *argv[]) { int nthreads, tid, i; float a[N], b[N]; omp_lock_t locka, lockb; /* Initialize the locks */ omp_init_lock(&locka); omp_init_lock(&lockb); /* Initialize the arrays */ for (i=0; i<N; i++) { a[i]=0; b[i]=0; } /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel shared(a, b, nthreads, locka, lockb) private(tid, i) { /* Obtain thread number and number of threads */ tid = omp_get_thread_num(); #pragma omp master { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } printf("Thread %d starting...\n", tid); #pragma omp barrier #pragma omp sections nowait { #pragma omp section { omp_set_lock(&locka); printf("Thread %d updating a[]\n",tid); for (i=0; i<N; i++) a[i] += DELTA * i; omp_set_lock(&lockb); printf("Thread %d updating b[]\n",tid); for (i=0; i<N; i++) b[i] += DELTA + i; omp_unset_lock(&lockb); omp_unset_lock(&locka); } #pragma omp section { omp_set_lock(&lockb); printf("Thread %d updating b[]\n",tid); for (i=0; i<N; i++) b[i] += PI * i; omp_set_lock(&locka); printf("Thread %d adding b[] to a[]\n",tid); for (i=0; i<N; i++) a[i] += PI + i; omp_unset_lock(&locka); omp_unset_lock(&lockb); } } /* end of sections */ } /* end of parallel region */ printf("Sample results: %f %f %f %f\n",a[0],b[0],a[999999],b[999999]); }
profile.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR OOO FFFFF IIIII L EEEEE % % P P R R O O F I L E % % PPPP RRRR O O FFF I L EEE % % P R R O O F I L E % % P R R OOO F IIIII LLLLL EEEEE % % % % % % MagickCore Image Profile Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/option-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #else #include <wchar.h> #include "lcms2.h" #endif #endif #if defined(MAGICKCORE_XML_DELEGATE) # if defined(MAGICKCORE_WINDOWS_SUPPORT) # if !defined(__MINGW32__) # include <win32config.h> # endif # endif # include <libxml/parser.h> # include <libxml/tree.h> #endif /* Forward declarations */ static MagickBooleanType SetImageProfileInternal(Image *,const char *,const StringInfo *, const MagickBooleanType,ExceptionInfo *); static void WriteTo8BimProfile(Image *,const char*,const StringInfo *); /* Typedef declarations */ struct _ProfileInfo { char *name; size_t length; unsigned char *info; size_t signature; }; typedef struct _CMSExceptionInfo { Image *image; ExceptionInfo *exception; } CMSExceptionInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickCoreSignature); if (clone_image->profiles != (void *) NULL) { if (image->profiles != (void *) NULL) DestroyImageProfiles(image); image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); WriteTo8BimProfile(image,name,(StringInfo *) NULL); return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) typedef struct _LCMSInfo { ColorspaceType colorspace; cmsUInt32Number type; size_t channels; cmsHPROFILE profile; int intent; double scale, translate; void **magick_restrict pixels; } LCMSInfo; #if LCMS_VERSION < 2060 static void* cmsGetContextUserData(cmsContext ContextID) { return(ContextID); } static cmsContext cmsCreateContext(void *magick_unused(Plugin),void *UserData) { magick_unreferenced(Plugin); return((cmsContext) UserData); } static void cmsSetLogErrorHandlerTHR(cmsContext magick_unused(ContextID), cmsLogErrorHandlerFunction Fn) { magick_unreferenced(ContextID); cmsSetLogErrorHandler(Fn); } static void cmsDeleteContext(cmsContext magick_unused(ContextID)) { magick_unreferenced(ContextID); } #endif static void **DestroyPixelThreadSet(void **pixels) { register ssize_t i; if (pixels == (void **) NULL) return((void **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (void *) NULL) pixels[i]=RelinquishMagickMemory(pixels[i]); pixels=(void **) RelinquishMagickMemory(pixels); return(pixels); } static void **AcquirePixelThreadSet(const size_t columns, const size_t channels,MagickBooleanType highres) { register ssize_t i; size_t number_threads; size_t size; void **pixels; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(void **) AcquireQuantumMemory(number_threads,sizeof(*pixels)); if (pixels == (void **) NULL) return((void **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); size=sizeof(double); if (highres == MagickFalse) size=sizeof(Quantum); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=AcquireQuantumMemory(columns,channels*size); if (pixels[i] == (void *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform) { register ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformThreadSet(const LCMSInfo *source_info, const LCMSInfo *target_info,const cmsUInt32Number flags, cmsContext cms_context) { cmsHTRANSFORM *transform; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) memset(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR(cms_context,source_info->profile, source_info->type,target_info->profile,target_info->type, target_info->intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformThreadSet(transform)); } return(transform); } static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { CMSExceptionInfo *cms_exception; ExceptionInfo *exception; Image *image; cms_exception=(CMSExceptionInfo *) cmsGetContextUserData(context); if (cms_exception == (CMSExceptionInfo *) NULL) return; exception=cms_exception->exception; if (exception == (ExceptionInfo *) NULL) return; image=cms_exception->image; if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s'","unknown context"); return; } if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s', %s (#%u)",image->filename, message != (char *) NULL ? message : "no message",severity); } static void TransformDoublePixels(const int id,const Image* image, const LCMSInfo *source_info,const LCMSInfo *target_info, const cmsHTRANSFORM *transform,Quantum *q) { #define GetLCMSPixel(source_info,pixel) \ (source_info->scale*QuantumScale*(pixel)+source_info->translate) #define SetLCMSPixel(target_info,pixel) \ ClampToQuantum(target_info->scale*QuantumRange*(pixel)+target_info->translate) register double *p; register ssize_t x; p=(double *) source_info->pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=GetLCMSPixel(source_info,GetPixelRed(image,q)); if (source_info->channels > 1) { *p++=GetLCMSPixel(source_info,GetPixelGreen(image,q)); *p++=GetLCMSPixel(source_info,GetPixelBlue(image,q)); } if (source_info->channels > 3) *p++=GetLCMSPixel(source_info,GetPixelBlack(image,q)); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_info->pixels[id], target_info->pixels[id],(unsigned int) image->columns); p=(double *) target_info->pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_info->channels == 1) SetPixelGray(image,SetLCMSPixel(target_info,*p),q); else SetPixelRed(image,SetLCMSPixel(target_info,*p),q); p++; if (target_info->channels > 1) { SetPixelGreen(image,SetLCMSPixel(target_info,*p),q); p++; SetPixelBlue(image,SetLCMSPixel(target_info,*p),q); p++; } if (target_info->channels > 3) { SetPixelBlack(image,SetLCMSPixel(target_info,*p),q); p++; } q+=GetPixelChannels(image); } } static void TransformQuantumPixels(const int id,const Image* image, const LCMSInfo *source_info,const LCMSInfo *target_info, const cmsHTRANSFORM *transform,Quantum *q) { register Quantum *p; register ssize_t x; p=(Quantum *) source_info->pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=GetPixelRed(image,q); if (source_info->channels > 1) { *p++=GetPixelGreen(image,q); *p++=GetPixelBlue(image,q); } if (source_info->channels > 3) *p++=GetPixelBlack(image,q); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_info->pixels[id], target_info->pixels[id],(unsigned int) image->columns); p=(Quantum *) target_info->pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_info->channels == 1) SetPixelGray(image,*p++,q); else SetPixelRed(image,*p++,q); if (target_info->channels > 1) { SetPixelGreen(image,*p++,q); SetPixelBlue(image,*p++,q); } if (target_info->channels > 3) SetPixelBlack(image,*p++,q); q+=GetPixelChannels(image); } } #endif static MagickBooleanType SetsRGBImageProfile(Image *image, ExceptionInfo *exception) { static unsigned char sRGBProfile[] = { 0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a, 0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67, 0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70, 0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88, 0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c, 0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24, 0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d, 0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57, 0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65, 0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c, 0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2, 0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d, 0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0, 0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87, 0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19, 0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37, 0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54, 0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72, 0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90, 0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae, 0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb, 0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb, 0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d, 0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32, 0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59, 0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83, 0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1, 0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1, 0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14, 0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b, 0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84, 0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1, 0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00, 0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43, 0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a, 0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3, 0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20, 0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71, 0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4, 0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c, 0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77, 0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5, 0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37, 0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d, 0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07, 0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74, 0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5, 0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a, 0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2, 0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f, 0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf, 0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54, 0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc, 0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69, 0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9, 0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e, 0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26, 0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3, 0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64, 0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09, 0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3, 0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61, 0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13, 0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9, 0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84, 0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43, 0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06, 0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce, 0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b, 0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c, 0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41, 0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b, 0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa, 0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd, 0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5, 0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2, 0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3, 0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99, 0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94, 0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94, 0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98, 0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1, 0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf, 0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2, 0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda, 0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7, 0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18, 0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f, 0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b, 0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b, 0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1, 0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c, 0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c, 0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91, 0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb, 0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a, 0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f, 0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8, 0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37, 0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c, 0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05, 0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74, 0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8, 0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61, 0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0, 0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64, 0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee, 0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d, 0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12, 0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab, 0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b, 0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0, 0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a, 0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a, 0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00, 0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb, 0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c, 0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42, 0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f, 0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0, 0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8, 0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95, 0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78, 0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61, 0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f, 0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43, 0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d, 0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d, 0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43, 0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f, 0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60, 0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78, 0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95, 0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8, 0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1, 0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11, 0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46, 0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81, 0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2, 0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a, 0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57, 0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab, 0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04, 0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64, 0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca, 0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36, 0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8, 0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20, 0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f, 0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24, 0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf, 0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40, 0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8, 0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76, 0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a, 0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4, 0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75, 0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d, 0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea, 0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae, 0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79, 0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a, 0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21, 0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff, 0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3, 0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce, 0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf, 0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7, 0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5, 0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba, 0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6, 0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8, 0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1, 0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10, 0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36, 0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63, 0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96, 0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0, 0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11, 0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58, 0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7, 0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb, 0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57, 0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba, 0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (GetImageProfile(image,"icc") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(sRGBProfile)); SetStringInfoDatum(profile,sRGBProfile); status=SetImageProfile(image,"icc",profile,exception); profile=DestroyStringInfo(profile); return(status); } MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length,ExceptionInfo *exception) { #define ProfileImageTag "Profile/Image" #ifndef TYPE_XYZ_8 #define TYPE_XYZ_8 (COLORSPACE_SH(PT_XYZ)|CHANNELS_SH(3)|BYTES_SH(1)) #endif #define ThrowProfileException(severity,tag,context) \ { \ if (profile != (StringInfo *) NULL) \ profile=DestroyStringInfo(profile); \ if (cms_context != (cmsContext) NULL) \ cmsDeleteContext(cms_context); \ if (source_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_info.profile); \ if (target_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_info.profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(name != (const char *) NULL); if ((datum == (const void *) NULL) || (length == 0)) { char *next; /* Delete image profile(s). */ ResetImageProfileIterator(image); for (next=GetNextImageProfile(image); next != (const char *) NULL; ) { if (IsOptionMember(next,name) != MagickFalse) { (void) DeleteImageProfile(image,next); ResetImageProfileIterator(image); } next=GetNextImageProfile(image); } return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile,exception); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace",exception); (void) value; if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image,exception); value=GetImageProperty(image,"exif:InteroperabilityIndex",exception); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image,exception); icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (LCMS)",image->filename); #else { cmsContext cms_context; CMSExceptionInfo cms_exception; LCMSInfo source_info, target_info; /* Transform pixel colors as defined by the color profiles. */ cms_exception.image=image; cms_exception.exception=exception; cms_context=cmsCreateContext(NULL,&cms_exception); if (cms_context == (cmsContext) NULL) ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); cmsSetLogErrorHandlerTHR(cms_context,CMSExceptionHandler); source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_info.profile == (cmsHPROFILE) NULL) { cmsDeleteContext(cms_context); ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } if ((cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile,exception); else { CacheView *image_view; cmsColorSpaceSignature signature; cmsHTRANSFORM *magick_restrict transform; cmsUInt32Number flags; #if !defined(MAGICKCORE_HDRI_SUPPORT) const char *artifact; #endif MagickBooleanType highres; MagickOffsetType progress; ssize_t y; target_info.profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_info.profile=source_info.profile; source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(icc_profile), (cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_info.profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } highres=MagickTrue; #if !defined(MAGICKCORE_HDRI_SUPPORT) artifact=GetImageArtifact(image,"profile:highres-transform"); if (IsStringFalse(artifact) != MagickFalse) highres=MagickFalse; #endif if (highres != MagickFalse) { source_info.scale=1.0; source_info.translate=0.0; } source_info.colorspace=sRGBColorspace; source_info.channels=3; switch (cmsGetColorSpace(source_info.profile)) { case cmsSigCmykData: { source_info.colorspace=CMYKColorspace; source_info.channels=4; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_CMYK_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_CMYK_16; else #endif { source_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; source_info.scale=100.0; } break; } case cmsSigGrayData: { source_info.colorspace=GRAYColorspace; source_info.channels=1; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_GRAY_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_GRAY_16; else #endif source_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; break; } case cmsSigLabData: { source_info.colorspace=LabColorspace; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_Lab_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_Lab_16; else #endif { source_info.type=(cmsUInt32Number) TYPE_Lab_DBL; source_info.scale=100.0; source_info.translate=(-0.5); } break; } case cmsSigRgbData: { source_info.colorspace=sRGBColorspace; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_RGB_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_RGB_16; else #endif source_info.type=(cmsUInt32Number) TYPE_RGB_DBL; break; } case cmsSigXYZData: { source_info.colorspace=XYZColorspace; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_XYZ_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_XYZ_16; else #endif source_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } signature=cmsGetPCS(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_info.profile); if (highres != MagickFalse) { target_info.scale=1.0; target_info.translate=0.0; } target_info.channels=3; switch (signature) { case cmsSigCmykData: { target_info.colorspace=CMYKColorspace; target_info.channels=4; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_CMYK_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_CMYK_16; else #endif { target_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; target_info.scale=0.01; } break; } case cmsSigGrayData: { target_info.colorspace=GRAYColorspace; target_info.channels=1; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_GRAY_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_GRAY_16; else #endif target_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; break; } case cmsSigLabData: { target_info.colorspace=LabColorspace; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_Lab_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_Lab_16; else #endif { target_info.type=(cmsUInt32Number) TYPE_Lab_DBL; target_info.scale=0.01; target_info.translate=0.5; } break; } case cmsSigRgbData: { target_info.colorspace=sRGBColorspace; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_RGB_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_RGB_16; else #endif target_info.type=(cmsUInt32Number) TYPE_RGB_DBL; break; } case cmsSigXYZData: { target_info.colorspace=XYZColorspace; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_XYZ_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_XYZ_16; else #endif target_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } switch (image->rendering_intent) { case AbsoluteIntent: { target_info.intent=INTENT_ABSOLUTE_COLORIMETRIC; break; } case PerceptualIntent: { target_info.intent=INTENT_PERCEPTUAL; break; } case RelativeIntent: { target_info.intent=INTENT_RELATIVE_COLORIMETRIC; break; } case SaturationIntent: { target_info.intent=INTENT_SATURATION; break; } default: { target_info.intent=INTENT_PERCEPTUAL; break; } } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformThreadSet(&source_info,&target_info, flags,cms_context); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_info.pixels=AcquirePixelThreadSet(image->columns, source_info.channels,highres); target_info.pixels=AcquirePixelThreadSet(image->columns, target_info.channels,highres); if ((source_info.pixels == (void **) NULL) || (target_info.pixels == (void **) NULL)) { target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); if (source_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); return(MagickFalse); } if (target_info.colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_info.colorspace,exception); progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } if (highres != MagickFalse) TransformDoublePixels(id,image,&source_info,&target_info,transform,q); else TransformQuantumPixels(id,image,&source_info,&target_info,transform,q); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ProfileImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_info.colorspace,exception); switch (signature) { case cmsSigRgbData: { image->type=image->alpha_trait == UndefinedPixelTrait ? TrueColorType : TrueColorAlphaType; break; } case cmsSigCmykData: { image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; break; } case cmsSigGrayData: { image->type=image->alpha_trait == UndefinedPixelTrait ? GrayscaleType : GrayscaleAlphaType; break; } default: break; } target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); if ((status != MagickFalse) && (cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass)) status=SetImageProfile(image,name,profile,exception); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); } (void) cmsCloseProfile(source_info.profile); cmsDeleteContext(cms_context); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); WriteTo8BimProfile(image,name,(StringInfo *) NULL); profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, unsigned int *quantum) { *quantum=(unsigned int) (*p++) << 24; *quantum|=(unsigned int) (*p++) << 16; *quantum|=(unsigned int) (*p++) << 8; *quantum|=(unsigned int) (*p++); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++) << 8; *quantum|=(unsigned short) (*p++); return(p); } static inline void WriteResourceLong(unsigned char *p, const unsigned int quantum) { unsigned char buffer[4]; buffer[0]=(unsigned char) (quantum >> 24); buffer[1]=(unsigned char) (quantum >> 16); buffer[2]=(unsigned char) (quantum >> 8); buffer[3]=(unsigned char) quantum; (void) memcpy(p,buffer,4); } static void WriteTo8BimProfile(Image *image,const char *name, const StringInfo *profile) { const unsigned char *datum, *q; register const unsigned char *p; size_t length; StringInfo *profile_8bim; ssize_t count; unsigned char length_byte; unsigned int value; unsigned short id, profile_id; if (LocaleCompare(name,"icc") == 0) profile_id=0x040f; else if (LocaleCompare(name,"iptc") == 0) profile_id=0x0404; else if (LocaleCompare(name,"xmp") == 0) profile_id=0x0424; else return; profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,"8bim"); if (profile_8bim == (StringInfo *) NULL) return; datum=GetStringInfoDatum(profile_8bim); length=GetStringInfoLength(profile_8bim); for (p=datum; p < (datum+length-16); ) { q=p; if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((count & 0x01) != 0) count++; if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length)) break; if (id != profile_id) p+=count; else { size_t extent, offset; ssize_t extract_extent; StringInfo *extract_profile; extract_extent=0; extent=(datum+length)-(p+count); if (profile == (StringInfo *) NULL) { offset=(q-datum); extract_profile=AcquireStringInfo(offset+extent); (void) memcpy(extract_profile->datum,datum,offset); } else { offset=(p-datum); extract_extent=profile->length; if ((extract_extent & 0x01) != 0) extract_extent++; extract_profile=AcquireStringInfo(offset+extract_extent+extent); (void) memcpy(extract_profile->datum,datum,offset-4); WriteResourceLong(extract_profile->datum+offset-4,(unsigned int) profile->length); (void) memcpy(extract_profile->datum+offset, profile->datum,profile->length); } (void) memcpy(extract_profile->datum+offset+extract_extent, p+count,extent); (void) AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString("8bim"),CloneStringInfo(extract_profile)); extract_profile=DestroyStringInfo(extract_profile); break; } } } static void GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block,ExceptionInfo *exception) { const unsigned char *datum; register const unsigned char *p; size_t length; ssize_t count; StringInfo *profile; unsigned char length_byte; unsigned int value; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0)) break; switch (id) { case 0x03ed: { unsigned int resolution; unsigned short units; /* Resolution. */ if (count < 10) break; p=ReadResourceLong(p,&resolution); image->resolution.x=((double) resolution)/65536.0; p=ReadResourceShort(p,&units)+2; p=ReadResourceLong(p,&resolution)+4; image->resolution.y=((double) resolution)/65536.0; /* Values are always stored as pixels per inch. */ if ((ResolutionType) units != PixelsPerCentimeterResolution) image->units=PixelsPerInchResolution; else { image->units=PixelsPerCentimeterResolution; image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"iptc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"icc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"exif",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"xmp",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } } #if defined(MAGICKCORE_XML_DELEGATE) static MagickBooleanType ValidateXMPProfile(const StringInfo *profile) { xmlDocPtr document; /* Parse XML profile. */ document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int) GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR | XML_PARSE_NOWARNING); if (document == (xmlDocPtr) NULL) return(MagickFalse); xmlFreeDoc(document); return(MagickTrue); } #else static MagickBooleanType ValidateXMPProfile(const StringInfo *profile) { return(MagickFalse); } #endif static MagickBooleanType SetImageProfileInternal(Image *image,const char *name, const StringInfo *profile,const MagickBooleanType recursive, ExceptionInfo *exception) { char key[MagickPathExtent]; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((LocaleCompare(name,"xmp") == 0) && (ValidateXMPProfile(profile) == MagickFalse)) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "CorruptImageProfile","`%s'",name); return(MagickTrue); } if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MagickPathExtent); LocaleLower(key); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),CloneStringInfo(profile)); if (status != MagickFalse) { if (LocaleCompare(name,"8bim") == 0) GetProfilesFromResourceBlock(image,profile,exception); else if (recursive == MagickFalse) WriteTo8BimProfile(image,name,profile); } return(status); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile,ExceptionInfo *exception) { return(SetImageProfileInternal(image,name,profile,MagickFalse,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline signed short ReadProfileShort(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned short value; if (endian == LSBEndian) { value=(unsigned short) buffer[1] << 8; value|=(unsigned short) buffer[0]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } value=(unsigned short) buffer[0] << 8; value|=(unsigned short) buffer[1]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } static inline signed int ReadProfileLong(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned int value; if (endian == LSBEndian) { value=(unsigned int) buffer[3] << 24; value|=(unsigned int) buffer[2] << 16; value|=(unsigned int) buffer[1] << 8; value|=(unsigned int) buffer[0]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } value=(unsigned int) buffer[0] << 24; value|=(unsigned int) buffer[1] << 16; value|=(unsigned int) buffer[2] << 8; value|=(unsigned int) buffer[3]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length) { signed int value; if (*length < 4) return(0); value=ReadProfileLong(MSBEndian,*p); (*length)-=4; *p+=4; return(value); } static inline signed short ReadProfileMSBShort(unsigned char **p, size_t *length) { signed short value; if (*length < 2) return(0); value=ReadProfileShort(MSBEndian,*p); (*length)-=2; *p+=2; return(value); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) memcpy(p,buffer,4); return; } buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) memcpy(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) memcpy(p,buffer,2); return; } buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) memcpy(p,buffer,2); } static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile) { size_t length; ssize_t count; unsigned char *p; unsigned short id; length=GetStringInfoLength(profile); p=GetStringInfoDatum(profile); while (length != 0) { if (ReadProfileByte(&p,&length) != 0x38) continue; if (ReadProfileByte(&p,&length) != 0x42) continue; if (ReadProfileByte(&p,&length) != 0x49) continue; if (ReadProfileByte(&p,&length) != 0x4D) continue; if (length < 7) return(MagickFalse); id=ReadProfileMSBShort(&p,&length); count=(ssize_t) ReadProfileByte(&p,&length); if ((count >= (ssize_t) length) || (count < 0)) return(MagickFalse); p+=count; length-=count; if ((*p & 0x01) == 0) (void) ReadProfileByte(&p,&length); count=(ssize_t) ReadProfileMSBLong(&p,&length); if ((count > (ssize_t) length) || (count < 0)) return(MagickFalse); if ((id == 0x3ED) && (count == 16)) { if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*2.54* 65536.0),p); else WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x* 65536.0),p); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4); if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*2.54* 65536.0),p+8); else WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y* 65536.0),p+8); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12); } p+=count; length-=count; } return(MagickTrue); } MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; size_t entry, length, number_entries; SplayTreeInfo *exif_resources; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; unsigned char *directory, *exif; /* Set EXIF resolution tag. */ length=GetStringInfoLength(profile); exif=GetStringInfoDatum(profile); if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); if ((id != 0x4949) && (id != 0x4D4D)) { while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); } endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ReadProfileLong(endian,exif+4); if ((offset < 0) || ((size_t) offset >= length)) return(MagickFalse); directory=exif+offset; level=0; entry=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } if ((directory < exif) || (directory > (exif+length-2))) break; /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { int components; register unsigned char *p, *q; size_t number_bytes; ssize_t format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); if (q > (exif+length-12)) break; /* corrupt EXIF */ if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS)) break; components=(int) ReadProfileLong(endian,q+4); if (components < 0) break; /* corrupt EXIF */ number_bytes=(size_t) components*format_bytes[format]; if ((ssize_t) number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { /* The directory entry contains an offset. */ offset=(ssize_t) ReadProfileLong(endian,q+8); if ((offset < 0) || ((size_t) (offset+number_bytes) > length)) continue; if (~length < number_bytes) continue; /* prevent overflow */ p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) image->orientation,p); break; } (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) (image->units+1),p); break; } (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { offset=(ssize_t) ReadProfileLong(endian,p); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ReadProfileLong(endian,directory+2+(12* number_entries)); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(MagickTrue); } MagickPrivate MagickBooleanType SyncImageProfiles(Image *image) { MagickBooleanType status; StringInfo *profile; status=MagickTrue; profile=(StringInfo *) GetImageProfile(image,"8BIM"); if (profile != (StringInfo *) NULL) if (Sync8BimProfile(image,profile) == MagickFalse) status=MagickFalse; profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile != (StringInfo *) NULL) if (SyncExifProfile(image,profile) == MagickFalse) status=MagickFalse; return(status); } static void UpdateClipPath(unsigned char *blob,size_t length, const size_t old_columns,const size_t old_rows, const RectangleInfo *new_geometry) { MagickBooleanType in_subpath; register ssize_t i; ssize_t knot_count, selector; knot_count=0; in_subpath=MagickFalse; while (length != 0) { selector=(ssize_t) ReadProfileMSBShort(&blob,&length); switch (selector) { case 0: case 3: { if (knot_count != 0) { blob+=24; length-=MagickMin(24,(ssize_t) length); break; } /* Expected subpath length record. */ knot_count=(ssize_t) ReadProfileMSBShort(&blob,&length); blob+=22; length-=MagickMin(22,(ssize_t) length); break; } case 1: case 2: case 4: case 5: { if (knot_count == 0) { /* Unexpected subpath knot. */ blob+=24; length-=MagickMin(24,(ssize_t) length); break; } /* Add sub-path knot */ for (i=0; i < 3; i++) { double x, y; signed int xx, yy; y=(double) ReadProfileMSBLong(&blob,&length); y=y*old_rows/4096/4096; y-=new_geometry->y; yy=(signed int) ((y*4096*4096)/new_geometry->height); WriteProfileLong(MSBEndian,(size_t) yy,blob-4); x=(double) ReadProfileMSBLong(&blob,&length); x=x*old_columns/4096/4096; x-=new_geometry->x; xx=(signed int) ((x*4096*4096)/new_geometry->width); WriteProfileLong(MSBEndian,(size_t) xx,blob-4); } in_subpath=MagickTrue; knot_count--; /* Close the subpath if there are no more knots. */ if (knot_count == 0) in_subpath=MagickFalse; break; } case 6: case 7: case 8: default: { blob+=24; length-=MagickMin(24,(ssize_t) length); break; } } } } MagickPrivate void Update8BIMClipPath(const StringInfo *profile, const size_t old_columns,const size_t old_rows, const RectangleInfo *new_geometry) { unsigned char *info; size_t length; ssize_t count, id; assert(profile != (StringInfo *) NULL); assert(new_geometry != (RectangleInfo *) NULL); length=GetStringInfoLength(profile); info=GetStringInfoDatum(profile); while (length > 0) { if (ReadProfileByte(&info,&length) != (unsigned char) '8') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'B') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'I') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'M') continue; id=(ssize_t) ReadProfileMSBShort(&info,&length); count=(ssize_t) ReadProfileByte(&info,&length); if ((count != 0) && ((size_t) count <= length)) { info+=count; length-=count; } if ((count & 0x01) == 0) (void) ReadProfileByte(&info,&length); count=(ssize_t) ReadProfileMSBLong(&info,&length); if ((count < 0) || ((size_t) count > length)) { length=0; continue; } if ((id > 1999) && (id < 2999)) UpdateClipPath(info,(size_t) count,old_columns,old_rows,new_geometry); info+=count; length-=MagickMin(count,(ssize_t) length); } }
GB_binop__times_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__times_int8 // A.*B function (eWiseMult): GB_AemultB__times_int8 // A*D function (colscale): GB_AxD__times_int8 // D*A function (rowscale): GB_DxB__times_int8 // C+=B function (dense accum): GB_Cdense_accumB__times_int8 // C+=b function (dense accum): GB_Cdense_accumb__times_int8 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__times_int8 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__times_int8 // C=scalar+B GB_bind1st__times_int8 // C=scalar+B' GB_bind1st_tran__times_int8 // C=A+scalar GB_bind2nd__times_int8 // C=A'+scalar GB_bind2nd_tran__times_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x * y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_INT8 || GxB_NO_TIMES_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__times_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__times_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__times_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__times_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__times_int8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__times_int8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__times_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__times_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__times_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__times_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB_bind1st_tran__times_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB_bind2nd_tran__times_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
histogram_stats.h
#pragma once #include <vector> #include <util/log/log.h> #include <util/timer.h> #include <util/serialization/pretty_print.h> #include <util/util.h> using namespace std; template<typename T> vector<int32_t> core_val_histogram(int n, T &core, bool is_print = false, bool is_print_bin = false) { Timer histogram_timer; // core-value histogram int max_core_val = 0; vector<int32_t> histogram; #pragma omp parallel { #pragma omp for reduction(max:max_core_val) for (auto u = 0; u < n; u++) { max_core_val = max(max_core_val, core[u]); } #pragma omp single { log_info("max value: %d", max_core_val); histogram = vector<int32_t>(max_core_val + 1, 0); } vector<int32_t> local_histogram(histogram.size()); #pragma omp for for (auto u = 0; u < n; u++) { auto core_val = core[u]; local_histogram[core_val]++; } // local_histogram[i] is immutable here. for (auto i = 0u; i < local_histogram.size(); i++) { #pragma omp atomic histogram[i] += local_histogram[i]; } } if (is_print) { if (histogram.size() < 400) { stringstream ss; ss << pretty_print_array(&histogram.front(), histogram.size()); log_info("values histogram: %s", ss.str().c_str()); } else { { stringstream ss; ss << pretty_print_array(&histogram.front(), 100); log_info("first100 values histogram: %s", ss.str().c_str()); } { stringstream ss; ss << pretty_print_array(&histogram.front() + histogram.size() - 100, 100); log_info("last100 values histogram: %s", ss.str().c_str()); } } } log_info("Histogram Time: %.9lf s", histogram_timer.elapsed()); if (is_print_bin) { auto &bins = histogram; auto bin_cnt = 0; int64_t acc = 0; auto thresh = n / 10; auto last = 0; for (auto i = 0u; i < histogram.size(); i++) { if (bins[i] > 0) { bin_cnt++; acc += bins[i]; if (acc > thresh || i == histogram.size() - 1) { log_info("bin[%d - %d]: %s", last, i, FormatWithCommas(acc).c_str()); last = i + 1; acc = 0; } } } log_info("Reversed Bins..."); last = histogram.size() - 1; acc = 0; for (int32_t i = histogram.size() - 1; i > -1; i--) { if (bins[i] > 0) { bin_cnt++; acc += bins[i]; if (acc > thresh || i == 0) { log_info("bin[%d - %d]: %s", i, last, FormatWithCommas(acc).c_str()); last = i + 1; acc = 0; } } } log_info("total bin counts: %d", bin_cnt); } return histogram; }
pthreads-libgomp-test.c
/* * This file is part of MXE. See LICENSE.md for licensing information. */ #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { (void)argc; (void)argv; omp_set_num_threads(4); #pragma omp parallel fprintf(stderr, "Hello from thread %d, nthreads %d\n", omp_get_thread_num(), omp_get_num_threads()); return 0; }
GB_binop__bxor_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxor_int16) // A.*B function (eWiseMult): GB (_AemultB_08__bxor_int16) // A.*B function (eWiseMult): GB (_AemultB_02__bxor_int16) // A.*B function (eWiseMult): GB (_AemultB_04__bxor_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_int16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bxor_int16) // C+=b function (dense accum): GB (_Cdense_accumb__bxor_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_int16) // C=scalar+B GB (_bind1st__bxor_int16) // C=scalar+B' GB (_bind1st_tran__bxor_int16) // C=A+scalar GB (_bind2nd__bxor_int16) // C=A'+scalar GB (_bind2nd_tran__bxor_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) ^ (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_INT16 || GxB_NO_BXOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxor_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxor_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxor_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxor_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxor_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxor_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxor_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB (_bind1st_tran__bxor_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB (_bind2nd_tran__bxor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
VolumetricAveragePooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "THNN/generic/VolumetricAveragePooling.c" #else #include <THNN/generic/pooling_shape.h> #include <algorithm> static inline void THNN_(VolumetricAveragePooling_shapeCheck)( THNNState *state, THTensor *input, THTensor *gradOutput, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool ceil_mode) { int64_t nslices; int64_t itime; int64_t iheight; int64_t iwidth; int64_t otime; int64_t oheight; int64_t owidth; int ndim = input->dim(); int dimN = 0; int dimt = 1; int dimh = 2; int dimw = 3; if (input->dim() == 5) { dimN++; dimt++; dimh++; dimw++; } THArgCheck(kT > 0 && kW > 0 && kH > 0, 5, "kernel size should be greater than zero, but got kT: %d kH: %d kW: %d", kT, kH, kW); THArgCheck(dT > 0 && dW > 0 && dH > 0, 8, "stride should be greater than zero, but got dT: %d dH: %d dW: %d", dT, dH, dW); THNN_ARGCHECK(!input->is_empty() && (input->dim() == 4 || input->dim() == 5), 2, input, "non-empty 4D or 5D (batch mode) tensor expected for input, but got: %s"); THArgCheck(input->size(dimw) >= kW && input->size(dimh) >= kH && input->size(dimt) >= kT, 2, "input image (T: %d H: %d W: %d) smaller than " "kernel size (kT: %d kH: %d kW: %d)", input->size(dimt), input->size(dimh), input->size(dimw), kT, kH, kW); // The second argument is argNumber... here is the index of padH. THArgCheck(kT/2 >= padT && kW/2 >= padW && kH/2 >= padH, 11, "pad should not be greater than half of kernel size, but got " "padT = %d, padW = %d, padH = %d, kT = %d, kW = %d, kH = %d", padT, padW, padH, kT, kW, kH); /* sizes */ nslices = input->size(dimN); itime = input->size(dimt); iheight = input->size(dimh); iwidth = input->size(dimw); otime = pooling_output_shape<int64_t>(itime, kT, padT, dT, 1, ceil_mode); oheight = pooling_output_shape<int64_t>(iheight, kH, padH, dH, 1, ceil_mode); owidth = pooling_output_shape<int64_t>(iwidth, kW, padW, dW, 1, ceil_mode); if (otime < 1 || owidth < 1 || oheight < 1) THError("Given input size: (%dx%dx%dx%d). " "Calculated output size: (%dx%dx%dx%d). Output size is too small", nslices,itime,iheight,iwidth,nslices,otime,oheight,owidth); if (gradOutput != NULL) { THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimN, nslices); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, otime); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, oheight); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, owidth); } } static void THNN_(VolumetricAveragePooling_updateOutput_frame)( scalar_t *input_p, scalar_t *output_p, int64_t nslices, int64_t itime, int64_t iwidth, int64_t iheight, int64_t otime, int64_t owidth, int64_t oheight, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool count_include_pad) { int64_t k; #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { int64_t i, j, ti; /* local pointers. */ scalar_t *ip = input_p + k * itime * iwidth * iheight; scalar_t *op = output_p + k * otime * owidth * oheight; for (i = 0; i < otime * oheight * owidth; ++i) *(op + i) = 0; /* loop over output */ for (ti = 0; ti < otime; ti++) { for (i = 0; i < oheight; i++) { for (j = 0; j < owidth; j++) { /* compute pool range. */ int64_t tstart = ti * dT - padT; int64_t hstart = i * dH - padH; int64_t wstart = j * dW - padW; int64_t tend = std::min(tstart + kT, itime + padT); int64_t hend = std::min(hstart + kH, iheight + padH); int64_t wend = std::min(wstart + kW, iwidth + padW); int64_t pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart); tstart = std::max(tstart, (int64_t) 0); hstart = std::max(hstart, (int64_t) 0); wstart = std::max(wstart, (int64_t) 0); tend = std::min(tend, itime); hend = std::min(hend, iheight); wend = std::min(wend, iwidth); int divide_factor; if (count_include_pad) divide_factor = pool_size; else divide_factor = (tend - tstart) * (hend - hstart) * (wend - wstart); /* compute local sum: */ scalar_t sum = 0.0; int64_t x, y, z; for (z = tstart; z < tend; z++) { for (y = hstart; y < hend; y++) { for (x = wstart; x < wend; x++) { sum += *(ip + z * iwidth * iheight + y * iwidth + x); } } } /* set output to local max */ *op++ += sum / divide_factor; } } } } } void THNN_(VolumetricAveragePooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool ceil_mode, bool count_include_pad) { int64_t nslices; int64_t itime; int64_t iheight; int64_t iwidth; int64_t otime; int64_t oheight; int64_t owidth; scalar_t *input_data; scalar_t *output_data; THNN_(VolumetricAveragePooling_shapeCheck)( state, input, NULL, kT, kW, kH, dT, dW, dH, padT, padW, padH, ceil_mode); int dimN = 0; int dimt = 1; int dimh = 2; int dimw = 3; if (input->dim() == 5) { dimN++; dimt++; dimh++; dimw++; } /* sizes */ nslices = input->size(dimN); itime = input->size(dimt); iheight = input->size(dimh); iwidth = input->size(dimw); otime = pooling_output_shape<int64_t>(itime, kT, padT, dT, 1, ceil_mode); oheight = pooling_output_shape<int64_t>(iheight, kH, padH, dH, 1, ceil_mode); owidth = pooling_output_shape<int64_t>(iwidth, kW, padW, dW, 1, ceil_mode); /* get contiguous input */ input = THTensor_(newContiguous)(input); if (input->dim() == 4) /* non-batch mode */ { /* resize output */ THTensor_(resize4d)(output, nslices, otime, oheight, owidth); input_data = input->data<scalar_t>(); output_data = output->data<scalar_t>(); THNN_(VolumetricAveragePooling_updateOutput_frame)( input_data, output_data, nslices, itime, iwidth, iheight, otime, owidth, oheight, kT, kW, kH, dT, dW, dH, padT, padW, padH, count_include_pad ); } else /* batch mode */ { int64_t p; int64_t nBatch = input->size(0); int64_t istride = nslices * itime * iwidth * iheight; int64_t ostride = nslices * otime * owidth * oheight; /* resize output */ THTensor_(resize5d)(output, nBatch, nslices, otime, oheight, owidth); input_data = input->data<scalar_t>(); output_data = output->data<scalar_t>(); #pragma omp parallel for private(p) for (p=0; p < nBatch; p++) { THNN_(VolumetricAveragePooling_updateOutput_frame)( input_data + p * istride, output_data + p * ostride, nslices, itime, iwidth, iheight, otime, owidth, oheight, kT, kW, kH, dT, dW, dH, padT, padW, padH, count_include_pad ); } } /* cleanup */ c10::raw::intrusive_ptr::decref(input); } static void THNN_(VolumetricAveragePooling_updateGradInput_frame)( scalar_t *gradInput_p, scalar_t *gradOutput_p, int64_t nslices, int64_t itime, int64_t iwidth, int64_t iheight, int64_t otime, int64_t owidth, int64_t oheight, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool count_include_pad) { int64_t k; #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { int64_t i, j, ti; /* local pointers */ scalar_t *ip = gradInput_p + k * itime * iwidth * iheight; scalar_t *op = gradOutput_p + k * otime * owidth * oheight; for (i = 0; i < itime*iwidth*iheight; i++) *(ip + i) = 0; /* loop over output */ for (ti = 0; ti < otime; ti++) { for (i = 0; i < oheight; i++) { for (j = 0; j < owidth; j++) { int64_t tstart = ti * dT - padT; int64_t hstart = i * dH - padH; int64_t wstart = j * dW - padW; int64_t tend = std::min(tstart + kT, itime + padT); int64_t hend = std::min(hstart + kH, iheight + padH); int64_t wend = std::min(wstart + kW, iwidth + padW); int64_t pool_size = (tend -tstart) * (hend - hstart) * (wend - wstart); tstart = std::max(tstart, (int64_t) 0); hstart = std::max(hstart, (int64_t) 0); wstart = std::max(wstart, (int64_t) 0); tend = std::min(tend, itime); hend = std::min(hend, iheight); wend = std::min(wend, iwidth); int64_t divide_factor; if (count_include_pad) divide_factor = pool_size; else divide_factor = (tend - tstart) * (hend - hstart) * (wend - wstart); /* scatter gradients out to footprint: */ scalar_t val = *op++; int64_t x,y,z; for (z = tstart; z < tend; z++) { for (y = hstart; y < hend; y++) { for (x = wstart; x < wend; x++) { *(ip + z * iheight * iwidth + y * iwidth + x) += val / divide_factor; } } } } } } } } void THNN_(VolumetricAveragePooling_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool ceil_mode, bool count_include_pad) { int64_t nslices; int64_t itime; int64_t iheight; int64_t iwidth; int64_t otime; int64_t oheight; int64_t owidth; scalar_t *gradInput_data; scalar_t *gradOutput_data; int dimN = 0; int dimt = 1; int dimh = 2; int dimw = 3; THNN_(VolumetricAveragePooling_shapeCheck)( state, input, gradOutput, kT, kW, kH, dT, dW, dH, padT, padW, padH, ceil_mode); /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); /* resize */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); if (input->dim() == 5) { dimN++; dimt++; dimh++; dimw++; } /* sizes */ nslices = input->size(dimN); itime = input->size(dimt); iheight = input->size(dimh); iwidth = input->size(dimw); otime = gradOutput->size(dimt); oheight = gradOutput->size(dimh); owidth = gradOutput->size(dimw); /* get raw pointers */ gradInput_data = gradInput->data<scalar_t>(); gradOutput_data = gradOutput->data<scalar_t>(); /* backprop */ if (input->dim() == 4) /* non-batch mode*/ { THNN_(VolumetricAveragePooling_updateGradInput_frame)( gradInput_data, gradOutput_data, nslices, itime, iwidth, iheight, otime, owidth, oheight, kT, kW, kH, dT, dW, dH, padT, padW, padH, count_include_pad ); } else /* batch mode */ { int64_t p; int64_t nBatch = input->size(0); int64_t istride = nslices * itime * iwidth * iheight; int64_t ostride = nslices * otime * owidth * oheight; #pragma omp parallel for private(p) for (p = 0; p < nBatch; p++) { THNN_(VolumetricAveragePooling_updateGradInput_frame)( gradInput_data + p * istride, gradOutput_data + p * ostride, nslices, itime, iwidth, iheight, otime, owidth, oheight, kT, kW, kH, dT, dW, dH, padT, padW, padH, count_include_pad ); } } /* cleanup */ c10::raw::intrusive_ptr::decref(gradOutput); } #endif
zlansy.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" /***************************************************************************//** * * @ingroup plasma_lansy * * Returns the norm of a symmetric matrix as * * zlansy = ( max(abs(A(i,j))), NORM = PlasmaMaxNorm * ( * ( norm1(A), NORM = PlasmaOneNorm * ( * ( normI(A), NORM = PlasmaInfNorm * ( * ( normF(A), NORM = PlasmaFrobeniusNorm * * where norm1 denotes the one norm of a matrix (maximum column sum), * normI denotes the infinity norm of a matrix (maximum row sum) and * normF denotes the Frobenius norm of a matrix (square root of sum * of squares). Note that max(abs(A(i,j))) is not a consistent matrix * norm. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: Max norm * - PlasmaOneNorm: One norm * - PlasmaInfNorm: Infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The order of the matrix A. n >= 0. * * @param[in,out] A * On entry, the symmetric matrix A. * If uplo = PlasmaUpper, the leading N-by-N upper triangular part of A * contains the upper triangular part of the matrix A, and the strictly * lower triangular part of A is not referenced. * If uplo = PlasmaLower, the leading N-by-N lower triangular part of A * contains the lower triangular part of the matrix A, and the strictly * upper triangular part of A is not referenced. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * ******************************************************************************* * * @retval double * The specified norm of the symmetric matrix A. * ******************************************************************************* * * @sa plasma_omp_zlansy * @sa plasma_clansy * @sa plasma_dlansy * @sa plasma_slansy * ******************************************************************************/ double plasma_zlansy(plasma_enum_t norm, plasma_enum_t uplo, int n, plasma_complex64_t *pA, int lda) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) && (norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm) ) { plasma_error("illegal value of norm"); return -1; } if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -5; } // quick return if (n == 0) return 0.0; // Tune parameters if (plasma->tuning) plasma_tune_lansy(plasma, PlasmaComplexDouble, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Allocate workspace. double *work = NULL; switch (norm) { case PlasmaMaxNorm: work = (double*)malloc((size_t)A.mt*A.nt*sizeof(double)); break; case PlasmaOneNorm: case PlasmaInfNorm: work = (double*)malloc(((size_t)A.mt*A.n+A.n)*sizeof(double)); break; case PlasmaFrobeniusNorm: work = (double*)malloc((size_t)2*A.mt*A.nt*sizeof(double)); break; } if (work == NULL) { plasma_error("malloc() failed"); return PlasmaErrorOutOfMemory; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); double value; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, &sequence, &request); // Call tile async function. plasma_omp_zlansy(norm, uplo, A, work, &value, &sequence, &request); } // implicit synchronization free(work); // Free matrix in tile layout. plasma_desc_destroy(&A); // Return the norm. return value; } /***************************************************************************//** * * @ingroup plasma_lansy * * Calculates the max, one, infinity or Frobenius norm of a symmetric matrix. * Non-blocking equivalent of plasma_zlansy(). May return before the * computation is finished. Operates on matrices stored by tiles. All matrices * are passed through descriptors. All dimensions are taken from the * descriptors. Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: Max norm * - PlasmaOneNorm: One norm * - PlasmaInfNorm: Infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] A * The descriptor of matrix A. * * @param[out] work * Workspace of size: * - PlasmaMaxNorm: A.mt*A.nt * - PlasmaOneNorm: A.mt*A.n + A.n * - PlasmaInfNorm: A.mt*A.n + A.n * - PlasmaFrobeniusNorm: 2*A.mt*A.nt * * @param[out] value * The calculated value of the norm requested. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zlansy * @sa plasma_omp_clansy * @sa plasma_omp_dlansy * @sa plasma_omp_slansy * ******************************************************************************/ void plasma_omp_zlansy(plasma_enum_t norm, plasma_enum_t uplo, plasma_desc_t A, double *work, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) && (norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm)) { plasma_error("illegal value of norm"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid descriptor A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.m == 0) { *value = 0.0; return; } // Call the parallel function. plasma_pzlansy(norm, uplo, A, work, value, sequence, request); }
GB_unop__identity_int64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int64_fp64 // op(A') function: GB_unop_tran__identity_int64_fp64 // C type: int64_t // A type: double // cast: int64_t cij = GB_cast_to_int64_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int64_t z = GB_cast_to_int64_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = GB_cast_to_int64_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int64_fp64 ( int64_t *Cx, // Cx and Ax may be aliased const double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; int64_t z = GB_cast_to_int64_t ((double) (aij)) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_subassign_15.c
//------------------------------------------------------------------------------ // GB_subassign_15: C(I,J)<!M> += scalar ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 15: C(I,J)<!M> += scalar ; using S // M: present // Mask_comp: true // C_replace: false // accum: present // A: scalar // S: constructed // C: not bitmap, but can be full since no zombies are inserted in that case // M: not bitmap #include "GB_subassign_methods.h" GrB_Info GB_subassign_15 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const bool Mask_struct, const GrB_BinaryOp accum, const void *scalar, const GrB_Type atype, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_MATRIX_WAIT_IF_JUMBLED (M) ; GB_GET_C ; // C must not be bitmap const int64_t Cnvec = C->nvec ; const int64_t *restrict Ch = C->h ; const int64_t *restrict Cp = C->p ; const bool C_is_hyper = (Ch != NULL) ; GB_GET_MASK ; GB_GET_S ; GB_GET_ACCUM_SCALAR ; //-------------------------------------------------------------------------- // Method 15: C(I,J)<!M> += scalar ; using S //-------------------------------------------------------------------------- // Time: Close to optimal; must visit all IxJ, so Omega(|I|*|J|) is // required. The sparsity of !M cannot be exploited. // Methods 13, 15, 17, and 19 are very similar. //-------------------------------------------------------------------------- // Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19) //-------------------------------------------------------------------------- GB_SUBASSIGN_IXJ_SLICE ; //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; //------------------------------------------------------------------ // get S(iA_start:end,j) and M(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; GB_GET_VECTOR_FOR_IXJ (M, iA_start) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC)<!M> += scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { //-------------------------------------------------------------- // Get the indices at the top of each list. //-------------------------------------------------------------- int64_t iS = (pS < pS_end) ? GBI (Si, pS, Svlen) : INT64_MAX ; int64_t iM = (pM < pM_end) ? GBI (Mi, pM, Mvlen) : INT64_MAX ; //-------------------------------------------------------------- // find the smallest index of [iS iA iM] (always iA) //-------------------------------------------------------------- int64_t i = iA ; //-------------------------------------------------------------- // get M(i,j) //-------------------------------------------------------------- bool mij ; if (i == iM) { // mij = (bool) M [pM] mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; GB_NEXT (M) ; } else { // mij not present, implicitly false ASSERT (i < iM) ; mij = false ; } // complement the mask entry mij since Mask_comp is true mij = !mij ; //-------------------------------------------------------------- // accumulate the entry //-------------------------------------------------------------- if (i == iS) { ASSERT (i == iA) ; { // both S (i,j) and A (i,j) present if (mij) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_scalar ; } GB_NEXT (S) ; } } else { ASSERT (i == iA) ; { // S (i,j) is not present, A (i,j) is present if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } } } } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; //------------------------------------------------------------------ // get S(iA_start:end,j) and M(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; GB_GET_VECTOR_FOR_IXJ (M, iA_start) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC)<!M> += scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { //-------------------------------------------------------------- // Get the indices at the top of each list. //-------------------------------------------------------------- int64_t iS = (pS < pS_end) ? GBI (Si, pS, Svlen) : INT64_MAX ; int64_t iM = (pM < pM_end) ? GBI (Mi, pM, Mvlen) : INT64_MAX ; //-------------------------------------------------------------- // find the smallest index of [iS iA iM] (always iA) //-------------------------------------------------------------- int64_t i = iA ; //-------------------------------------------------------------- // get M(i,j) //-------------------------------------------------------------- bool mij ; if (i == iM) { // mij = (bool) M [pM] mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; GB_NEXT (M) ; } else { // mij not present, implicitly false ASSERT (i < iM) ; mij = false ; } // complement the mask entry mij since Mask_comp is true mij = !mij ; //-------------------------------------------------------------- // accumulate the entry //-------------------------------------------------------------- if (i == iS) { ASSERT (i == iA) ; { GB_NEXT (S) ; } } else { ASSERT (i == iA) ; { // S (i,j) is not present, A (i,j) is present if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } } } } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
EPI_fmt_plug.c
/* * EPiServer module for john 1.7.2 (and possibly later) * Uses hashes/salts found in the tblSID of an EPiServer database installation * * Created by Johannes Gumbel (johannes [at] iforge.cc) * * If you have any questions as to how a function incorporates with john, please refer to formats.h of john * * version 0.1 released on 10 jan 2007 * * See doc/README.format-epi for information on the input file format. * * Updated Dec, 2014, JimF. Added OMP, and allowed more than one hash to be * processed at once (OMP_SCALE stuff). */ #if FMT_EXTERNS_H extern struct fmt_main fmt_EPI; #elif FMT_REGISTERS_H john_register_one(&fmt_EPI); #else #include <string.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "sha.h" #ifdef _OPENMP #include <omp.h> #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 8192 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 32768 // Tuned, K8-dual HT #endif #endif // __MIC__ #endif #include "memdbg.h" #define CIPHERTEXT_LENGTH 105 #define PLAINTEXT_LENGTH 125 #define BINARY_LENGTH 20 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_LENGTH 30 #define SALT_ALIGN 4 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static int (*key_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_LENGTH / 4]; static char global_salt[SALT_LENGTH+1]; static struct fmt_tests global_tests[] = { {"0x5F1D84A6DE97E2BEFB637A3CB5318AFEF0750B856CF1836BD1D4470175BE 0x4D5EFDFA143EDF74193076F174AC47CEBF2F417F", "Abc.!23"}, // new tests from pass_gen.pl {"0x4F5233704337716F63526A7066344B52784F7A6363316750516A72335668 0x7346DA02479E55973E052FC9A173A3FEA4644FF8","test1"}, {"0x76706335715834565A55784662304F3367756350684F634447777A313642 0xDBD3D2764A376673164962E3EE2AE95AB6ED2759","thatsworking"}, {"0x6F724166466172354A7431316A4842746878434B6632744945574A37524A 0xE1ADE625160BB27C16184795715F1C9EF30C45B0","test3"}, {NULL} }; static void init(struct fmt_main *self) { #if defined (_OPENMP) int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif key_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*key_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); MEM_FREE(key_len); } /* * Expects ciphertext of format: 0xHEX*60 0xHEX*40 */ static int valid(char *ciphertext, struct fmt_main *self) { unsigned int len, n; if (!ciphertext) return 0; len = strnlen(ciphertext, CIPHERTEXT_LENGTH + 1); if (len != CIPHERTEXT_LENGTH) return 0; // check fixed positions if (ciphertext[0] != '0' || ciphertext[1] != 'x' || ciphertext[62] != ' ' || ciphertext[63] != '0' || ciphertext[64] != 'x') return 0; for (n = 2; n < 62 && atoi16u[ARCH_INDEX(ciphertext[n])] != 0x7F; ++n); if (n < 62) return 0; for (n = 65; n < CIPHERTEXT_LENGTH && atoi16u[ARCH_INDEX(ciphertext[n])] != 0x7F; ++n); return n == len; } static void _tobin(char* dst, char *src, unsigned int len) { unsigned int n; if (src[0] == '0' && src[1] == 'x') src += sizeof(char)*2; for (n = 0; n < len; ++n) dst[n] = atoi16[ARCH_INDEX(src[n*2])]<<4 | atoi16[ARCH_INDEX(src[n*2+1])]; } static void* get_binary(char *ciphertext) { static ARCH_WORD bin[(BINARY_LENGTH + sizeof(ARCH_WORD) - 1) / sizeof(ARCH_WORD)]; _tobin((char*)bin, (char*)(ciphertext+65), BINARY_LENGTH); return bin; } static void* get_salt(char *ciphertext) { static ARCH_WORD salt[(SALT_LENGTH + sizeof(ARCH_WORD) - 1) / sizeof(ARCH_WORD)]; _tobin((char*)salt, (char*)(ciphertext+2), sizeof(salt)); return salt; } static void set_salt(void *salt) { memcpy(global_salt, salt, SALT_LENGTH); } static void set_key(char *key, int index) { key_len[index] = strnzcpyn(saved_key[index], key, sizeof(*saved_key)) + 1; } static char* get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int i=0; #ifdef _OPENMP #pragma omp parallel for private(i) shared(global_salt, saved_key, key_len, crypt_out) #endif #if defined (_OPENMP) || MAX_KEYS_PER_CRYPT>1 for (i = 0; i < count; ++i) #endif { SHA_CTX ctx; SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char*)global_salt, SALT_LENGTH-1); SHA1_Update(&ctx, saved_key[i], key_len[i]); SHA1_Final((unsigned char*)crypt_out[i], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if ( ((uint32_t*)binary)[0] == crypt_out[index][0] ) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_LENGTH); } static int cmp_exact(char *source, int index) { return 1; } #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" static int salt_hash(void *salt) { return *(uint32_t*)salt & (SALT_HASH_SIZE - 1); } // Define john integration struct fmt_main fmt_EPI = { { // fmt_params "EPI", "EPiServer SID", "SHA1 32/" ARCH_BITS_STR, "", // benchmark comment 0, // benchmark length 0, PLAINTEXT_LENGTH, BINARY_LENGTH, BINARY_ALIGN, SALT_LENGTH, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { NULL }, global_tests }, { // fmt_methods init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
trsm_x_bsr_n_hi_row.c
#include "alphasparse/opt.h" #include "alphasparse/kernel.h" #include "alphasparse/util.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_BSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { const ALPHA_INT num_thread = alpha_get_thread_num(); const ALPHA_INT bs = A->block_size; ALPHA_Number* diag=(ALPHA_Number*) alpha_malloc(A->rows*bs*sizeof(ALPHA_Number)); const ALPHA_INT m = A->rows*bs; const ALPHA_INT n = A->cols*bs; // assert(m==n); memset(diag, '\0', m * sizeof(ALPHA_Number)); const ALPHA_INT bs2 = bs * bs; const ALPHA_INT b_rows = m / bs; const ALPHA_INT b_cols = n / bs; const alphasparse_layout_t block_layout = A->block_layout; if(block_layout != ALPHA_SPARSE_LAYOUT_ROW_MAJOR) { printf("layout not consistent!!!\n"); exit(-1); } #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for(ALPHA_INT br = 0 ; br < b_rows; br++){ for(ALPHA_INT ai = A->rows_start[br]; ai < A->rows_end[br]; ai++){ ALPHA_INT bc = A->col_indx[ai]; if(bc == br){ for(ALPHA_INT b_row = 0 ; b_row < bs ; b_row++){ diag[index2(br,b_row,bs)] = A->values[ai * bs2 + b_row *(bs + 1)]; } } } } #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++) { ALPHA_Number* temp = (ALPHA_Number*) alpha_malloc(bs*sizeof(ALPHA_Number)); for (ALPHA_INT br = b_rows - 1; br >= 0; br--) { for(ALPHA_INT i = 0 ; i < bs ; i++){ alpha_setzero(temp[i]); } ALPHA_INT diagBlock = -1; for (ALPHA_INT ai = A->rows_start[br]; ai < A->rows_end[br]; ai++) { ALPHA_INT bc = A->col_indx[ai]; if(bc > br) //row-major for(ALPHA_INT row = 0; row < bs; row++) { //all entities belongs to upper triangle ALPHA_INT a0_offset = ai * bs2 + row * bs; for(ALPHA_INT col = 0 ; col < bs ; col++) { ALPHA_INT y_offset = (bc * bs + col) * ldy + out_y_col; ALPHA_INT ele_offset = a0_offset + col; alpha_madde(temp[row], A->values[ ele_offset ] ,y[y_offset]); } } //diagonal must be none-zero block if( bc==br ){ diagBlock = ai; } } if(diagBlock == -1) { printf("lhs matrix invalid for trsm!!!\n"); exit(-1); } //row-major //right-bottom most for(ALPHA_INT row = bs - 1; row >=0 ; row--) { //upper triangle of block for(ALPHA_INT col = row + 1 ; col < bs ; col++){ ALPHA_INT y_offset = (br * bs + col) * ldy + out_y_col; alpha_madde(temp[row] ,A->values[ diagBlock * bs2 + row * bs + col] ,y[y_offset]); } ALPHA_Number t; alpha_setzero(t); alpha_mul(t,alpha,x[(br * bs + row) * ldx + out_y_col] ); alpha_sub(t,t,temp[row]); alpha_div(y[(br * bs + row) * ldy + out_y_col],t, diag[row + br * bs]); } } alpha_free(temp); } alpha_free(diag); return ALPHA_SPARSE_STATUS_SUCCESS; }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-8,12),ceild(4*t2-Nz-11,24));t3<=min(min(floord(4*Nt+Ny-9,24),floord(2*t1+Ny-3,24)),floord(4*t2+Ny-9,24));t3++) { for (t4=max(max(ceild(t1-508,512),ceild(4*t2-Nz-1011,1024)),ceild(24*t3-Ny-1011,1024));t4<=min(min(min(floord(4*Nt+Nx-9,1024),floord(2*t1+Nx-3,1024)),floord(4*t2+Nx-9,1024)),floord(24*t3+Nx+11,1024));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) { lbv=max(1024*t4,4*t5+4); ubv=min(1024*t4+1023,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
GB_convert_sparse_to_hyper.c
//------------------------------------------------------------------------------ // GB_convert_sparse_to_hyper: convert a matrix from sparse to hyperspasre //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // On input, the matrix may have shallow A->p content; it is safely removed. // On output, the matrix is always hypersparse (even if out of memory). If the // input matrix is non-hypersparse, it is given new A->p and A->h that are not // shallow. If the input matrix is already hypersparse, nothing is changed // (and in that case A->p and A->h remain shallow on output if shallow on // input). The A->x and A->i content is not changed; it remains in whatever // shallow/non-shallow/iso property that it had on input). // If an out-of-memory condition occurs, all content of the matrix is cleared. // If the input matrix A is hypersparse, bitmap or full, it is unchanged. #include "GB.h" GrB_Info GB_convert_sparse_to_hyper // convert from sparse to hypersparse ( GrB_Matrix A, // matrix to convert to hypersparse GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (A, "A converting to hypersparse", GB0) ; int64_t anz = GB_nnz (A) ; ASSERT (GB_ZOMBIES_OK (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (GB_PENDING_OK (A)) ; //-------------------------------------------------------------------------- // convert A from sparse to hypersparse //-------------------------------------------------------------------------- if (GB_IS_SPARSE (A)) { //---------------------------------------------------------------------- // determine the number of threads to use //---------------------------------------------------------------------- GBURBLE ("(sparse to hyper) ") ; int64_t n = A->vdim ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (n, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ; ntasks = GB_IMIN (ntasks, n) ; ntasks = GB_IMAX (ntasks, 1) ; //---------------------------------------------------------------------- // count the number of non-empty vectors in A in each slice //---------------------------------------------------------------------- ASSERT (A->nvec == A->plen && A->plen == n) ; const int64_t *restrict Ap_old = A->p ; size_t Ap_old_size = A->p_size ; bool Ap_old_shallow = A->p_shallow ; GB_WERK_DECLARE (Count, int64_t) ; GB_WERK_PUSH (Count, ntasks+1, int64_t) ; if (Count == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend, my_nvec_nonempty = 0 ; ; GB_PARTITION (jstart, jend, n, tid, ntasks) ; for (int64_t j = jstart ; j < jend ; j++) { if (Ap_old [j] < Ap_old [j+1]) my_nvec_nonempty++ ; } Count [tid] = my_nvec_nonempty ; } //---------------------------------------------------------------------- // compute cumulative sum of Counts and nvec_nonempty //---------------------------------------------------------------------- GB_cumsum (Count, ntasks, NULL, 1, NULL) ; int64_t nvec_nonempty = Count [ntasks] ; A->nvec_nonempty = nvec_nonempty ; //---------------------------------------------------------------------- // allocate the new A->p and A->h //---------------------------------------------------------------------- int64_t *restrict Ap_new = NULL ; size_t Ap_new_size = 0 ; int64_t *restrict Ah_new = NULL ; size_t Ah_new_size = 0 ; Ap_new = GB_MALLOC (nvec_nonempty+1, int64_t, &Ap_new_size) ; Ah_new = GB_MALLOC (nvec_nonempty , int64_t, &Ah_new_size) ; if (Ap_new == NULL || Ah_new == NULL) { // out of memory GB_WERK_POP (Count, int64_t) ; GB_FREE (&Ap_new, Ap_new_size) ; GB_FREE (&Ah_new, Ah_new_size) ; return (GrB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // transplant the new A->p and A->h into the matrix //---------------------------------------------------------------------- A->plen = nvec_nonempty ; A->nvec = nvec_nonempty ; A->p = Ap_new ; A->p_size = Ap_new_size ; A->h = Ah_new ; A->h_size = Ah_new_size ; A->p_shallow = false ; A->h_shallow = false ; //---------------------------------------------------------------------- // construct the new hyperlist in the new A->p and A->h //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend, k = Count [tid] ; GB_PARTITION (jstart, jend, n, tid, ntasks) ; for (int64_t j = jstart ; j < jend ; j++) { if (Ap_old [j] < Ap_old [j+1]) { // vector index j is the kth vector in the new Ah Ap_new [k] = Ap_old [j] ; Ah_new [k] = j ; k++ ; } } ASSERT (k == Count [tid+1]) ; } Ap_new [nvec_nonempty] = anz ; A->magic = GB_MAGIC ; //---------------------------------------------------------------------- // free workspace, and free the old A->p unless it's shallow //---------------------------------------------------------------------- GB_WERK_POP (Count, int64_t) ; if (!Ap_old_shallow) { GB_FREE (&Ap_old, Ap_old_size) ; } //---------------------------------------------------------------------- // A is now hypersparse //---------------------------------------------------------------------- ASSERT (GB_IS_HYPERSPARSE (A)) ; } //-------------------------------------------------------------------------- // A is now in hypersparse form (or left as full or bitmap) //-------------------------------------------------------------------------- ASSERT (anz == GB_nnz (A)) ; ASSERT_MATRIX_OK (A, "A conv to hypersparse (or left full/bitmap)", GB0) ; ASSERT (!GB_IS_SPARSE (A)) ; ASSERT (GB_ZOMBIES_OK (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (GB_PENDING_OK (A)) ; return (GrB_SUCCESS) ; }
csr_matvec_oomp.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Matvec functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "seq_mv.h" #include "_hypre_utilities.hpp" #if defined(HYPRE_USING_DEVICE_OPENMP) /*-------------------------------------------------------------------------- * hypre_CSRMatrixMatvec *--------------------------------------------------------------------------*/ /* y[offset:end] = alpha*A[offset:end,:]*x + beta*b[offset:end] */ HYPRE_Int hypre_CSRMatrixMatvecOutOfPlaceOOMP( HYPRE_Int trans, HYPRE_Complex alpha, hypre_CSRMatrix *A, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *b, hypre_Vector *y, HYPRE_Int offset ) { HYPRE_Int A_nrows = hypre_CSRMatrixNumRows(A); HYPRE_Int A_ncols = hypre_CSRMatrixNumCols(A); HYPRE_Int A_nnz = hypre_CSRMatrixNumNonzeros(A); HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A) + offset; HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int y_size = hypre_VectorSize(y) - offset; HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *b_data = hypre_VectorData(b) + offset; HYPRE_Complex *y_data = hypre_VectorData(y) + offset; HYPRE_Int i; #ifdef HYPRE_USING_CUSPARSE cusparseHandle_t handle = hypre_HandleCusparseHandle(hypre_handle()); cusparseMatDescr_t descr = hypre_HandleCusparseMatDescr(hypre_handle()); #endif //hypre_CSRMatrixPrefetch(A, HYPRE_MEMORY_DEVICE); //hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE); //hypre_SeqVectorPrefetch(b, HYPRE_MEMORY_DEVICE); //if (b != y) //{ // hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); //} if (b != y) { #pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, b_data) for (i = 0; i < y_size; i++) { y_data[i] = b_data[i]; } } if (x == y) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR::x and y are the same pointer in hypre_CSRMatrixMatvecDevice\n"); } // TODO if (offset != 0) { hypre_printf("WARNING:: Offset is not zero in hypre_CSRMatrixMatvecDevice :: \n"); } hypre_assert(offset == 0); if (trans) { HYPRE_Complex *csc_a = hypre_TAlloc(HYPRE_Complex, A_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Int *csc_j = hypre_TAlloc(HYPRE_Int, A_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Int *csc_i = hypre_TAlloc(HYPRE_Int, A_ncols+1, HYPRE_MEMORY_DEVICE); HYPRE_CUSPARSE_CALL( cusparseDcsr2csc(handle, A_nrows, A_ncols, A_nnz, A->data, A->i, A->j, csc_a, csc_j, csc_i, CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO) ); HYPRE_CUDA_CALL(cudaDeviceSynchronize()); #ifdef HYPRE_USING_CUSPARSE HYPRE_CUSPARSE_CALL( cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, A->num_cols, A->num_rows, A->num_nonzeros, &alpha, descr, csc_a, csc_i, csc_j, x->data, &beta, y->data) ); #else #pragma omp target teams distribute parallel for private(i) is_device_ptr(csc_a, csc_i, csc_j, y_data, x_data) for (i = 0; i < A_ncols; i++) { HYPRE_Complex tempx = 0.0; HYPRE_Int j; for (j = csc_i[i]; j < csc_i[i+1]; j++) { tempx += csc_a[j] * x_data[csc_j[j]]; } y_data[i] = alpha*tempx + beta*y_data[i]; } #endif hypre_TFree(csc_a, HYPRE_MEMORY_DEVICE); hypre_TFree(csc_i, HYPRE_MEMORY_DEVICE); hypre_TFree(csc_j, HYPRE_MEMORY_DEVICE); } else { #ifdef HYPRE_USING_CUSPARSE HYPRE_CUSPARSE_CALL( cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, A_nrows, A_ncols, A_nnz, &alpha, descr, A_data, A_i, A_j, x_data, &beta, y_data) ); #else #pragma omp target teams distribute parallel for private(i) is_device_ptr(A_data, A_i, A_j, y_data, x_data) for (i = 0; i < A_nrows; i++) { HYPRE_Complex tempx = 0.0; HYPRE_Int j; for (j = A_i[i]; j < A_i[i+1]; j++) { tempx += A_data[j] * x_data[A_j[j]]; } y_data[i] = alpha*tempx + beta*y_data[i]; } #endif } /* HYPRE_CUDA_CALL(cudaDeviceSynchronize()); */ return hypre_error_flag; } #endif /* #if defined(HYPRE_USING_DEVICE_OPENMP) */
django_fmt_plug.c
/* Django 1.4 patch for JtR. Hacked together during May of 2012 by * Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * Input Format => user:$django$*type*django-hash * * Where, * * type => 1, for Django 1.4 pbkdf_sha256 hashes and * * django-hash => Second column of "SELECT username, password FROM auth_user" * * July, 2012, the oSSL PKCS5_PBKDF2_HMAC function was replaced with a much faster * function pbkdf2() designed by JimF. Originally this function was designed for * the mscash2 (DCC2). The same pbkdf2 function, is used, and simply required small * changes to use SHA256. * * This new code is 3x to 4x FASTER than the original oSSL code. Even though it is * only useing oSSL functions. A lot of the high level stuff in oSSL sux for speed. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_django; #elif FMT_REGISTERS_H john_register_one(&fmt_django); #else // uncomment this header to use the slower PKCS5_PBKDF2_HMAC function. // Note, PKCS5_PBKDF2_HMAC is ONLY available in oSSL 1.00 + (1.0c I think to be exact) //#include <openssl/evp.h> #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "sha2.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "base64.h" #include "base64_convert.h" #include "pbkdf2_hmac_sha256.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 4 // tuned on core i7 #endif static int omp_t = 1; #endif #include "memdbg.h" #define FORMAT_LABEL "Django" #define FORMAT_NAME "" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA256 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT " (x10000)" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define HASH_LENGTH 44 #define BINARY_SIZE 32 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests django_tests[] = { {"$django$*1*pbkdf2_sha256$10000$qPmFbibfAY06$x/geVEkdZSlJMqvIYJ7G6i5l/6KJ0UpvLUU6cfj83VM=", "openwall"}, {"$django$*1*pbkdf2_sha256$10000$BVmpZMBhRSd7$2nTDwPhSsDKOwpKiV04teVtf+a14Rs7na/lIB3KnHkM=", "123"}, {"$django$*1*pbkdf2_sha256$10000$BVmpZMBhRSd1$bkdQo9RoatRomupPFP+XEo+Guuirq4mi+R1cFcV0U3M=", "openwall"}, {"$django$*1*pbkdf2_sha256$10000$BVmpZMBhRSd6$Uq33DAHOFHUED+32IIqCqm+ITU1mhsGOJ7YwFf6h+6k=", "password"}, {"$django$*1*pbkdf2_sha256$10000$34L3roCQ6ZfN$R21tJK1sIDfmj9BfBocefFfuGVwE3pXcLEhChNjc+pU=", "0123456789012345678901234567890123456789012345678901234567890123"}, {"$django$*1*pbkdf2_sha256$10000$7qPqyUDw8kZV$pFmVRjlHvayoWEy8ZWXkHgfmgImUKLmkmruclpYVAxM=", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct custom_salt { int type; int iterations; unsigned char salt[32]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(sizeof(*saved_key), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_align(sizeof(*crypt_out), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; if (strncmp(ciphertext, "$django$*", 9) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy;; ctcopy += 9; if ((p = strtokm(ctcopy, "*")) == NULL) /* type */ goto err; /* type must be 1 */ if (!isdec(p)) goto err; if (atoi(p) != 1) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* algorithm */ goto err; if (strcmp(p, "pbkdf2_sha256") != 0) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* iterations */ goto err; if (!isdec(p)) // FIXME: what about iterations == 0? goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt */ goto err; if (strlen(p) > sizeof(cur_salt->salt)-1) goto err; if ((p = strtokm(NULL, "")) == NULL) /* hash */ goto err; if (strlen(p)-1 != base64_valid_length(p,e_b64_mime,flg_Base64_MIME_TRAIL_EQ) || strlen(p)-1 > HASH_LENGTH-1) { goto err; } MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char Buf[120], *ctcopy=Buf; char *p, *t; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); strncpy(Buf, ciphertext, 119); Buf[119] = 0; ctcopy += 9; /* skip over "$django$*" */ p = strtokm(ctcopy, "*"); cs.type = atoi(p); strtokm(NULL, "$"); t = strtokm(NULL, "$"); cs.iterations = atoi(t); t = strtokm(NULL, "$"); strcpy((char*)cs.salt, t); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; p = strrchr(ciphertext, '$') + 1; base64_decode(p, strlen(p), (char*)out); return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { #ifdef SIMD_COEF_32 int lens[MAX_KEYS_PER_CRYPT], i; unsigned char *pin[MAX_KEYS_PER_CRYPT]; union { ARCH_WORD_32 *pout[MAX_KEYS_PER_CRYPT]; unsigned char *poutc; } x; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = strlen(saved_key[i+index]); pin[i] = (unsigned char*)saved_key[i+index]; x.pout[i] = crypt_out[i+index]; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, &(x.poutc), 32, 0); #else // PKCS5_PBKDF2_HMAC(saved_key[index], strlen(saved_key[index]), // cur_salt->salt, strlen((char*)cur_salt->salt), // cur_salt->iterations, EVP_sha256(), 32, (unsigned char*)crypt_out[index]); pbkdf2_sha256((unsigned char *)saved_key[index], strlen(saved_key[index]), cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, (unsigned char*)crypt_out[index], 32, 0); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void django_set_key(char *key, int index) { strcpy(saved_key[index], key); } static char *get_key(int index) { return saved_key[index]; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int)my_salt->iterations; } struct fmt_main fmt_django = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, django_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, django_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
lu.c
void LU_decomp(int const n, int const lda, double* const A) { int i, j, k; // Semaphores char row_ready[n]; for (i = 0; i < n; i++) row_ready[i] = 0; row_ready[0] = 1; // For all "iron" rows #pragma omp parallel for private(j,k) schedule(dynamic,1) for (i = 1; i < n; i++) { double * const Ai = A + i*lda; // Pointer to row i // For all "hammer" rows for (k = 0; k < i; k++) { double * const Ak = A + k*lda; // Pointer to row k // Spin until "hammer" k is ready while (! row_ready[k]) { #pragma omp flush } // Compute the scaling factor (and the element of L) Ai[k] /= Ak[k]; // Hit row "iron" row i with "hammer" row k #pragma omp simd for (j = k + 1; j < n; j++) Ai[j] -= Ai[k]*Ak[j]; } // Change semaphore for row i to "green" row_ready[i] = 1; } }
dbg.h
/* * @author Priyank Faldu <Priyank.Faldu@ed.ac.uk> <http://faldupriyank.com> * * Copyright 2019 The University of Edinburgh * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <vector> #include <parallel/algorithm> #include "ligra.h" #include "pvector.h" typedef std::pair<uintT, uintE> degree_nodeid_t; extern int rand_gran; extern string map_file; enum ReorderingAlgo { ORIGINAL = 0, Random = 1, Sort = 2, HubSort = 3, HubCluster = 4, DBG = 5, HubSortDBG = 6, HubClusterDBG = 7, MAP = 10, //TODO: MAP file format MAP_ORIGINAL = MAP, MAP_Sort = 12, MAP_HubSort = 13, MAP_HubCluster = 14, MAP_DBG = 15, MAP_HubSortDBG = 16, MAP_HubClusterDBG = 17 }; const string ReorderingAlgoStr(ReorderingAlgo type) { switch(type) { case HubSort: return "HubSort"; case DBG: return "DBG"; case HubClusterDBG: return "HubClusterDBG"; case HubSortDBG: return "HubSortDBG"; case HubCluster: return "HubCluster"; case Random: return "Random-" + to_string(rand_gran); case ORIGINAL: return "Original"; case Sort: return "Sort"; case MAP: return "MAP"; case MAP_HubSort: return "MAP_HubSort"; case MAP_HubCluster: return "MAP_HubCluster"; case MAP_DBG: return "MAP_DBG"; case MAP_Sort: return "MAP_Sort"; case MAP_HubSortDBG: return "MAP_HubSortDBG"; case MAP_HubClusterDBG: return "MAP_HubClusterDBG"; default: std::cout << "Unknown PreprocessTypeStr type: " << type << std::endl; abort(); } assert(0); return ""; } template <class vertex> int verify_mapping(const graph<vertex>& GA, const pvector<uintE>& new_ids, long numVertices) { uintE* hist = newA(uintE, numVertices); {parallel_for(long i = 0 ; i < numVertices ; i++ ) { hist[i] = new_ids[i]; }} __gnu_parallel::sort(&hist[0], &hist[numVertices]); uintE count = 0; {parallel_for(long i = 0 ; i < numVertices ; i++ ) { if ( hist[i] != i) { __sync_fetch_and_add(&count, 1); } }} if ( count != 0 ){ std::cout << "Num of vertices did not match: " << count << std::endl; std::cout << "Mapping is invalid.!" << std::endl; abort(); } else { std::cout << "Mapping is valid.!" << std::endl; } free(hist); } template <class vertex> void generateRandomMapping(const graph<vertex>& GA, bool isSym, bool useOutdeg, pvector<uintE>& new_ids, bool isPageRank, bool isDenseWrite) { Timer t; t.Start(); auto num_vertices = GA.n; auto num_edges = GA.m; uintE granularity = rand_gran; uintE slice = (num_vertices - granularity + 1) / granularity; uintE artificial_num_vertices = slice * granularity; assert(artificial_num_vertices <= num_vertices); pvector<uintE> slice_index; slice_index.resize(slice); {parallel_for(long i=0;i<slice;i++) { slice_index[i] = i; }} std::random_shuffle(slice_index.begin(), slice_index.end()); {parallel_for(long i = 0 ; i < slice ; i++) { long new_index = slice_index[i] * granularity; for(long j = 0 ; j < granularity ; j++ ) { long v = (i * granularity) + j; if ( v < artificial_num_vertices) { new_ids[v] = new_index + j; } } }} for (long i = artificial_num_vertices ; i < num_vertices ; i++ ) { new_ids[i] = i; } slice_index.clear(); t.Stop(); t.PrintTime("Random Map Time", t.Seconds()); } template <class vertex> void generateHubSortDBGMapping(const graph<vertex>& GA, bool isSym, bool useOutdeg, pvector<uintE>& new_ids, bool isPageRank, bool isDenseWrite) { Timer t; t.Start(); auto numVertices = GA.n; auto numEdges = GA.m; vertex *origG = GA.V; uintT avgDegree = numEdges / numVertices; uintT hubCount {0}; const int num_threads = omp_get_max_threads(); pvector<degree_nodeid_t> local_degree_id_pairs[num_threads]; uintE slice = numVertices / num_threads; uintE start[num_threads]; uintE end[num_threads]; uintE hub_count[num_threads]; uintE non_hub_count[num_threads]; uintE new_index[num_threads]; for ( int t = 0 ; t < num_threads ; t++ ) { start[t] = t * slice; end[t] = (t+1) * slice; hub_count[t] = 0; } end[num_threads-1] = numVertices; #pragma omp parallel for schedule(static) num_threads(num_threads) for ( uintE t = 0 ; t < num_threads ; t++ ) { for (uintE v = start[t]; v < end[t]; ++v) { vertex vtx = origG[v]; if (useOutdeg) { if (vtx.getOutDegree() > avgDegree) { local_degree_id_pairs[t].push_back(std::make_pair(vtx.getOutDegree(), v)); } } else { if (vtx.getInDegree() > avgDegree) { local_degree_id_pairs[t].push_back(std::make_pair(vtx.getInDegree(), v)); } } } } for ( int t = 0 ; t < num_threads ; t++ ) { hub_count[t] = local_degree_id_pairs[t].size(); hubCount += hub_count[t]; non_hub_count[t] = end[t] - start[t] - hub_count[t]; } new_index[0] = hubCount; for ( int t = 1 ; t < num_threads ; t++ ) { new_index[t] = new_index[t-1] + non_hub_count[t-1]; } pvector<degree_nodeid_t> degree_id_pairs(hubCount); long k = 0; for ( int i = 0 ; i < num_threads ; i++ ) { for ( long j = 0 ; j < local_degree_id_pairs[i].size() ; j++ ) { degree_id_pairs[k++] = local_degree_id_pairs[i][j]; } local_degree_id_pairs[i].clear(); } assert(degree_id_pairs.size() == hubCount); assert(k == hubCount); __gnu_parallel::sort(degree_id_pairs.begin(), degree_id_pairs.end(), std::greater<degree_nodeid_t>()); #pragma omp parallel for for (uintE n = 0; n < hubCount; ++n) { new_ids[degree_id_pairs[n].second] = n; } pvector<degree_nodeid_t>().swap(degree_id_pairs); #pragma omp parallel for schedule(static) num_threads(num_threads) for ( uintE t = 0 ; t < num_threads ; t++ ) { for (uintE v = start[t]; v < end[t]; ++v) { if ( new_ids[v] == UINT_E_MAX ) { new_ids[v] = new_index[t]++; } } } t.Stop(); t.PrintTime("HubSortDBG Map Time", t.Seconds()); } template <class vertex> void generateHubClusterDBGMapping(const graph<vertex>& GA, bool isSym, bool useOutdeg, pvector<uintE>& new_ids, bool isPageRank, bool isDenseWrite) { Timer t; t.Start(); auto num_vertices = GA.n; auto num_edges = GA.m; vertex *origG = GA.V; uint32_t avg_vertex = num_edges / num_vertices; const int num_buckets = 2; avg_vertex = avg_vertex; uint32_t bucket_threshold[] = {avg_vertex, static_cast<uint32_t>(-1)}; vector<uint32_t> bucket_vertices[num_buckets]; const int num_threads = omp_get_max_threads(); vector<uint32_t> local_buckets[num_threads][num_buckets]; if ( useOutdeg ) { // This loop relies on a static scheduling #pragma omp parallel for schedule(static) for ( uint64_t i = 0 ; i < num_vertices ; i++ ) { for ( unsigned int j = 0 ; j < num_buckets ; j++ ) { const uintE& count = origG[i].getOutDegree(); if ( count <= bucket_threshold[j] ) { local_buckets[omp_get_thread_num()][j].push_back(i); break; } } } } else { #pragma omp parallel for schedule(static) for ( uint64_t i = 0 ; i < num_vertices ; i++ ) { for ( unsigned int j = 0 ; j < num_buckets ; j++ ) { const uintE& count = origG[i].getInDegree(); if ( count <= bucket_threshold[j] ) { local_buckets[omp_get_thread_num()][j].push_back(i); break; } } } } int temp_k = 0; uint32_t start_k[num_threads][num_buckets]; for ( int32_t j = num_buckets-1 ; j >= 0 ; j-- ) { for ( int t = 0 ; t < num_threads ; t++ ) { start_k[t][j] = temp_k; temp_k += local_buckets[t][j].size(); } } #pragma omp parallel for schedule(static) for ( int t = 0 ; t < num_threads; t++ ) { for ( int32_t j = num_buckets-1 ; j >= 0 ; j-- ) { const vector<uint32_t>& current_bucket = local_buckets[t][j]; int k = start_k[t][j]; const size_t& size = current_bucket.size(); for ( uint32_t i = 0 ; i < size ; i++ ) { new_ids[ current_bucket[i] ] = k++; } } } for ( uint64_t i = 0 ; i < num_threads ; i++ ) { for ( unsigned int j = 0 ; j < num_buckets ; j++ ) { local_buckets[i][j].clear(); } } t.Stop(); t.PrintTime("HubClusterDBG Map Time", t.Seconds()); } template <class vertex> void generateDBGMapping(const graph<vertex>& GA, bool isSym, bool useOutdeg, pvector<uintE>& new_ids, bool isPageRank, bool isDenseWrite) { Timer t; t.Start(); auto num_vertices = GA.n; auto num_edges = GA.m; vertex *origG = GA.V; uint32_t avg_vertex = num_edges / num_vertices; const uint32_t& av = avg_vertex; uint32_t bucket_threshold[] = {av/2, av, av*2, av*4, av*8, av*16, av*32, av*64, av*128, av*256, av*512, static_cast<uint32_t>(-1)}; int num_buckets = 8; if ( num_buckets > 11 ) { // if you really want to increase the bucket count, add more thresholds to the bucket_threshold above. std::cout << "Unsupported bucket size: " << num_buckets << std::endl; assert(0); } bucket_threshold[num_buckets-1] = static_cast<uint32_t>(-1); vector<uint32_t> bucket_vertices[num_buckets]; const int num_threads = omp_get_max_threads(); vector<uint32_t> local_buckets[num_threads][num_buckets]; if ( useOutdeg ) { // This loop relies on a static scheduling #pragma omp parallel for schedule(static) for ( uint64_t i = 0 ; i < num_vertices ; i++ ) { for ( unsigned int j = 0 ; j < num_buckets ; j++ ) { const uintE& count = origG[i].getOutDegree(); if ( count <= bucket_threshold[j] ) { local_buckets[omp_get_thread_num()][j].push_back(i); break; } } } } else { #pragma omp parallel for schedule(static) for ( uint64_t i = 0 ; i < num_vertices ; i++ ) { for ( unsigned int j = 0 ; j < num_buckets ; j++ ) { const uintE& count = origG[i].getInDegree(); if ( count <= bucket_threshold[j] ) { local_buckets[omp_get_thread_num()][j].push_back(i); break; } } } } int temp_k = 0; uint32_t start_k[num_threads][num_buckets]; for ( int32_t j = num_buckets-1 ; j >= 0 ; j-- ) { for ( int t = 0 ; t < num_threads ; t++ ) { start_k[t][j] = temp_k; temp_k += local_buckets[t][j].size(); } } #pragma omp parallel for schedule(static) for ( int t = 0 ; t < num_threads; t++ ) { for ( int32_t j = num_buckets-1 ; j >= 0 ; j-- ) { const vector<uint32_t>& current_bucket = local_buckets[t][j]; int k = start_k[t][j]; const size_t& size = current_bucket.size(); for ( uint32_t i = 0 ; i < size ; i++ ) { new_ids[ current_bucket[i] ] = k++; } } } for ( uint64_t i = 0 ; i < num_threads ; i++ ) { for ( unsigned int j = 0 ; j < num_buckets ; j++ ) { local_buckets[i][j].clear(); } } t.Stop(); t.PrintTime("DBG Map Time", t.Seconds()); } template <class vertex> void generateSortMapping(const graph<vertex>& GA, bool isSym, bool useOutdeg, pvector<uintE>& new_ids, bool isPageRank, bool isDenseWrite) { Timer t; t.Start(); auto numVertices = GA.n; auto numEdges = GA.m; vertex *origG = GA.V; pvector<degree_nodeid_t> degree_id_pairs(numVertices); if (useOutdeg) { #pragma omp parallel for for (uintE v = 0; v < numVertices; ++v) { degree_id_pairs[v] = std::make_pair(origG[v].getOutDegree(), v); } } else { #pragma omp parallel for for (uintE v = 0; v < numVertices; ++v) { degree_id_pairs[v] = std::make_pair(origG[v].getInDegree(), v); } } __gnu_parallel::sort(degree_id_pairs.begin(), degree_id_pairs.end(), std::greater<degree_nodeid_t>()); #pragma omp parallel for for (uintE n = 0; n < numVertices; ++n) { new_ids[degree_id_pairs[n].second] = n; } pvector<degree_nodeid_t>().swap(degree_id_pairs); t.Stop(); t.PrintTime("Sort Map Time", t.Seconds()); } //Supported format is text file //line1: V (number of vertices) //line2: E (number of edges) //line3+i: a pair of IDs i and i' separated by space. (where i is from 0 to V-1) // //Example map file with 3 vertices and 4 edges may look like below. //3 //4 //0 2 //1 0 //2 1 template <class vertex> void LoadMappingFromFile(const graph<vertex>& GA, bool isSym, bool useOutdeg, pvector<uintE>& new_ids, bool isPageRank, bool isDenseWrite) { Timer t; t.Start(); auto num_vertex = GA.n; auto num_edges = GA.m; ifstream ifs(map_file.c_str(), std::ifstream::in); if (!ifs.good()) { cout << "File " << map_file << " does not exist!" << endl; exit(-1); } unsigned long int num_vertex_1, num_edges_1; ifs >> num_vertex_1; ifs >> num_edges_1; cout << " num_vertex: " << num_vertex_1 << " num_edges: " << num_edges_1 << endl; cout << " num_vertex: " << num_vertex << " num_edges: " << num_edges << endl; if ( num_vertex != num_vertex_1 ) { cout << "Mismatch: " << num_vertex << " " << num_vertex_1 << endl; exit (-1); } if ( num_vertex != new_ids.size() ) { cout << "Mismatch: " << num_vertex << " " << new_ids.size() << endl; exit (-1); } if ( num_edges != num_edges_1 ) { cout << "Warning! Potential mismatch: " << num_edges << " " << num_edges_1 << endl; } char c; unsigned long int st, v; bool tab = true; if ( tab ) { for ( unsigned int i = 0 ; i < num_vertex ; i++ ) { ifs >> st >> v; new_ids[st] = v; } } else { for ( unsigned int i = 0 ; i < num_vertex ; i++ ) { ifs >> c >> st >> c >> v >> c; new_ids[st] = v; } } ifs.close(); t.Stop(); t.PrintTime("Load Map Time", t.Seconds()); } template <class vertex> void generateMapping(const graph<vertex>& GA, ReorderingAlgo reordering_algo, bool isSym, bool useOutdeg, pvector<uintE>& new_ids, bool isPageRank, bool isDenseWrite) { switch(reordering_algo) { case HubSort: generateHubSortMapping(GA, isSym, useOutdeg, new_ids, isPageRank, isDenseWrite); break; case Sort: generateSortMapping(GA, isSym, useOutdeg, new_ids, isPageRank, isDenseWrite); break; case DBG: generateDBGMapping(GA, isSym, useOutdeg, new_ids, isPageRank, isDenseWrite); break; case HubSortDBG: generateHubSortDBGMapping(GA, isSym, useOutdeg, new_ids, isPageRank, isDenseWrite); break; case HubClusterDBG: generateHubClusterDBGMapping(GA, isSym, useOutdeg, new_ids, isPageRank, isDenseWrite); break; case HubCluster: generateHubClusterMapping(GA, isSym, useOutdeg, new_ids, isPageRank, isDenseWrite); break; case Random: generateRandomMapping(GA, isSym, useOutdeg, new_ids, isPageRank, isDenseWrite); break; case MAP: LoadMappingFromFile(GA, isSym, useOutdeg, new_ids, isPageRank, isDenseWrite); break; case ORIGINAL: std::cout << "Should not be here!" << std::endl; abort(); return; break; default: std::cout << "Unknown generateMapping type: " << reordering_algo << std::endl; abort(); } #ifdef _DEBUG verify_mapping(GA, new_ids, GA.n); exit(-1); #endif }
5194.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop { /* E := A*B */ #pragma omp parallel for simd schedule(static, 28) for (i = 0; i < _PB_NI; i++) { for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } } /* F := C*D */ #pragma omp parallel for simd schedule(static, 28) for (i = 0; i < _PB_NJ; i++) { for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } } /* G := E*F */ #pragma omp parallel for simd schedule(static, 28) for (i = 0; i < _PB_NI; i++) { for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
hsrun.c
#include <limits.h> #include <stdio.h> #include <errno.h> #include <string.h> #include <stdbool.h> #include <omp.h> #include "hs.h" #include "hs_compile_mnrl.h" #include "ht.h" #include "read_input.h" typedef struct run_ctx_t { r_map *report_map; hs_database_t *database; hs_scratch_t *scratch; char *inputData; size_t length; unsigned int inp_off; //for the location in filename list unsigned int db_off; // for the location in filename list unsigned int *counts; // for support values, if used } run_ctx; /** * This is the function that will be called for each match that occurs. @a ctx * is to allow you to have some application-specific state that you will get * access to for each match. In our simple example we're just going to use it * to pass in the pattern that was being searched for so we can print it out. */ static int eventHandler(unsigned int id, unsigned long long from, unsigned long long to, unsigned int flags, void *ctx) { r_map *report_map = (r_map *) ctx; r_map *m = find_mapping(id, &report_map); if(m == NULL) { printf("couldn't find mapping: %u\n", id); return 1; } printf("Match at id::code::offset %s::%s::%llu\n", m->name, m->report, to); return 0; } static int supportEventHandler(unsigned int id, unsigned long long from, unsigned long long to, unsigned int flags, void *ctx) { unsigned int *support = (unsigned int *) ctx; //#pragma omp atomic support[id]++; return 0; } static void usage(char *prog) { fprintf(stderr, "Usage: %s [-t NUM_TREADS] [--support] <hs databases> <input files>\n", prog); fprintf(stderr, " -t NUM_THREADS use no more than NUM_THREADS threads\n"); fprintf(stderr, " --support enable aggregation of reports; print final counts\n"); } int main(int argc, char *argv[]) { if (argc < 3) { usage(argv[0]); return 1; } unsigned int num_dbs = 0; unsigned int num_inputs = 0; bool support = false; int num_threads = 0; char *db_fns[argc]; char *input_fns[argc]; for ( int i=1; i<argc; i++ ) { if ( strcmp("-t", argv[i]) == 0 ) { // setting number of threads if ( i+1 <= argc ) { i++; num_threads = atoi(argv[i]); } else { usage(argv[0]); return 44; } continue; } if ( strcmp("--support", argv[i]) == 0 ) { // turn on support reporting support = true; continue; } size_t len = strlen(argv[i]); if ( len < 4 ) { input_fns[num_inputs] = argv[i]; num_inputs += 1; } else { if ( argv[i][len-1] == 's' && argv[i][len-2] == 'h' && argv[i][len-3] == '.' ) { db_fns[num_dbs] = argv[i]; num_dbs += 1; } else { input_fns[num_inputs] = argv[i]; num_inputs += 1; } } } if ( num_dbs == 0 || num_inputs == 0 ) { usage(argv[0]); return 45; } run_ctx contexts[num_dbs*num_inputs]; size_t inputs_length[num_inputs]; // for cleanup hs_database_t *dbs_to_delete[num_dbs]; char *inputs_to_delete[num_inputs]; r_map *rmaps_to_delete[num_dbs]; unsigned int *supports_to_delete[num_dbs]; //loop through the inputs to get input data for ( int j=0; j < num_inputs; j++ ) { char *inputFN = input_fns[j]; size_t length; /* Next, we read the input data file into a buffer. */ char *inputData; inputData = readInputData(inputFN, &length); if (!inputData) { fprintf(stderr, "ERROR: Unable to read input data '%s'. Exiting.\n", inputFN); //hs_free_database(database); for( int i=0; i<j; i++){ free(inputs_to_delete[i]); } return 4; } inputs_to_delete[j] = inputData; inputs_length[j] = length; } // loop through the dbs for ( int i=0; i < num_dbs; i++ ) { char *hsDB = db_fns[i]; // First, read in the database size_t length; char *hsSerDB; char *hsmSer; hsmSer = readInputData(hsDB, &length); if(!hsmSer) { return 2; } // extract the mapping r_map *report_map = NULL; size_t map_length; unserialize_mapping (hsmSer, &map_length, &report_map); rmaps_to_delete[i] = report_map; // redo the database pointer hsSerDB = hsmSer + map_length ; length -= map_length ; // Next, we try to deserialize hs_database_t *database; hs_compile_error_t *compile_err; if(hs_deserialize_database(hsSerDB, length, &database) != HS_SUCCESS) { fprintf(stderr, "ERROR: Unable to load HyperScan database file \"%s\": %s. Exiting.\n", hsDB, compile_err->message); free(hsmSer); delete_all(&report_map); hs_free_compile_error(compile_err); // deallocate inputs for(int j=0; j<num_inputs; j++) { free(inputs_to_delete[j]); } for(int j=0; j<i; j++) { // deallocate previous databases free(dbs_to_delete[j]); if(support){ free(supports_to_delete[j]); } // delete report map delete_all(&(rmaps_to_delete[j])); // kill off all the scratch space that was stored previously for(int k=0; k<num_inputs; k++) { free(contexts[j*num_inputs+k].scratch); } } return 3; } // keep track of the database dbs_to_delete[i] = database; // make a support array if needed if(support) { supports_to_delete[i] = (unsigned int*) malloc(sizeof(unsigned int) * count_mapping(&report_map)); } else { supports_to_delete[i] = NULL; } //printf("Allocating scratch...\n"); hs_scratch_t *db_scratch = NULL; if (hs_alloc_scratch(database, &db_scratch) != HS_SUCCESS) { fprintf(stderr, "ERROR: Unable to allocate scratch space for database '%s'. Exiting.\n", hsDB); free(hsmSer); delete_all(&report_map); hs_free_database(database); // deallocate inputs for(int j=0; j<num_inputs; j++) { free(inputs_to_delete[j]); } if(support) { free(supports_to_delete[i]); } for(int j=0; j<i; j++) { // deallocate previous databases free(dbs_to_delete[j]); if(support) { free(supports_to_delete[j]); } // delete report map delete_all(&(rmaps_to_delete[j])); // kill off all the scratch space that was stored previously for(int k=0; k<num_inputs; k++) { free(contexts[j*num_inputs+k].scratch); } } return 5; } //loop through the inputs for ( int j=0; j < num_inputs; j++ ) { char *inputData; inputData = inputs_to_delete[j]; /* Finally, we issue a call to hs_scan, which will search the input buffer * for the pattern represented in the bytecode. Note that in order to do * this, scratch space needs to be allocated with the hs_alloc_scratch * function. In typical usage, you would reuse this scratch space for many * calls to hs_scan, but as we're only doing one, we'll be allocating it * and deallocating it as soon as our matching is done. * * When matches occur, the specified callback function (eventHandler in * this file) will be called. Note that although it is reminiscent of * asynchronous APIs, Hyperscan operates synchronously: all matches will be * found, and all callbacks issued, *before* hs_scan returns. * * In this example, we provide the input pattern as the context pointer so * that the callback is able to print out the pattern that matched on each * match event. */ hs_scratch_t *scratch = NULL; if ( hs_clone_scratch(db_scratch, &scratch) != HS_SUCCESS ) { printf("ERROR: Unable to allocate cloned scratch space. Exiting.\n"); free(inputData); free(hsmSer); delete_all(&report_map); hs_free_database(database); // deallocate inputs for(int j=0; j<num_inputs; j++) { free(inputs_to_delete[j]); } if(support) { free(supports_to_delete[i]); } for(int j=0; j<i; j++) { // deallocate previous databases free(dbs_to_delete[j]); if(support) { free(supports_to_delete[j]); } // delete report map delete_all(&(rmaps_to_delete[j])); // kill off all the scratch space that was stored previously for(int k=0; k<num_inputs; k++) { free(contexts[j*num_inputs+k].scratch); } } return 7; } /* Store all of the context information */ contexts[i*num_inputs+j].report_map = report_map; contexts[i*num_inputs+j].database = database; contexts[i*num_inputs+j].scratch = scratch; contexts[i*num_inputs+j].inputData = inputData; contexts[i*num_inputs+j].length = inputs_length[j]; contexts[i*num_inputs+j].db_off = i; contexts[i*num_inputs+j].inp_off = j; contexts[i*num_inputs+j].counts = supports_to_delete[i]; /* Scanning is complete, any matches have been handled, so now we just * clean up and exit. */ } // input loop free(hsmSer); // hs_free_database(database); // free up db_scratch hs_free_scratch(db_scratch); } // database loop //printf("Simulating graph on input data with Hyperscan...\n"); //okay do the scanning if(num_threads > 0) { omp_set_dynamic(1); omp_set_num_threads(num_threads); } //#pragma omp parallel for for ( int i=0; i<num_inputs*num_dbs; i++ ) { run_ctx ctx = contexts[i]; if(!support) { // scan each input and report runtime if (hs_scan(ctx.database, ctx.inputData, ctx.length, 0, ctx.scratch, eventHandler, ctx.report_map) != HS_SUCCESS) { fprintf(stderr, "ERROR: Unable to scan input buffer '%s' with database '%s'.\n", input_fns[ctx.inp_off], db_fns[ctx.db_off]); /* * No need to stop, just keep trying hs_free_scratch(scratch); free(inputData); free(hsmSer); hs_free_database(database); return 6; */ } } else { if (hs_scan(ctx.database, ctx.inputData, ctx.length, 0, ctx.scratch, supportEventHandler, ctx.counts) != HS_SUCCESS) { fprintf(stderr, "ERROR: Unable to scan input buffer '%s' with database '%s'.\n", input_fns[ctx.inp_off], db_fns[ctx.db_off]); } } } // print out supports if(support) { printf("File, ID, Report ID, Count\n"); for ( int i=0; i<num_dbs; i++ ) { r_map *mapping = rmaps_to_delete[i]; for ( int j=0; j<count_mapping(&mapping); j++) { r_map *m = find_mapping(j, &mapping); printf("%s, %s, %s, %u\n", db_fns[i], m->name, m->report, supports_to_delete[i][j]); } } } // cleanup for ( int i=0; i<num_inputs*num_dbs; i++ ) { run_ctx ctx = contexts[i]; hs_free_scratch(ctx.scratch); } for ( int i=0; i<num_dbs; i++ ) { free(dbs_to_delete[i]); delete_all(&(rmaps_to_delete[i])); free(supports_to_delete[i]); } for ( int i=0; i<num_inputs; i++ ) { free(inputs_to_delete[i]); } return 0; }
task_in_joinbarrier.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt #define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN #include "callback.h" #include <omp.h> int main() { int condition=0; omp_set_nested(0); print_frame(0); #pragma omp parallel num_threads(2) { print_frame_from_outlined_fn(1); print_ids(0); print_ids(1); print_frame(0); #pragma omp master { print_ids(0); #pragma omp task shared(condition) { OMPT_SIGNAL(condition); print_frame(1); print_ids(0); print_ids(1); print_ids(2); } OMPT_WAIT(condition,1); print_ids(0); } print_ids(0); } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_schedule' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released' // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: new_task_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: __builtin_frame_address(0)=[[MAIN_REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[MAIN_REENTER]], parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=2, codeptr_ra=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:[0-9]+]] // nested parallel masters // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // <- ompt_event_task_create would be expected here // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit=[[EXIT]], parent_task_frame.reenter=[[REENTER]], new_task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[TASK_FUNCTION:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // implicit barrier parallel // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]] // implicit barrier parallel // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_task_schedule: first_task_id=[[IMPLICIT_TASK_ID]], second_task_id=[[TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(1)=[[TASK_EXIT:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]], exit_frame=[[TASK_EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_task_schedule: first_task_id=[[TASK_ID]], second_task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_task_end: task_id=[[TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] return 0; }
matrix.c
#include <omp.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include <matrix/matrix.h> int mtx_init(struct mtx* const m, size_t rows, size_t columns, mpfr_prec_t prec) { m->nrows = rows; m->ncols = columns; m->storage = (mpfr_t*) malloc(sizeof(mpfr_t) * m->nrows * m->ncols); if (NULL == m->storage) { return -1; } size_t i, j; #pragma omp parallel for private(i, j) schedule(static) for(i = 0; i < m->nrows; ++i) { for(j = 0; j < m->ncols; ++j) { mpfr_init2(*(m->storage + i * m->ncols + j), prec); } } return 0; } int mtx_clear(struct mtx const m) { size_t i, j; #pragma omp parallel for private(i, j) schedule(static) for(i = 0; i < m.nrows; ++i) { for(j = 0; j < m.ncols; ++j) { mpfr_clear(*(m.storage + i * m.ncols + j)); } } free(m.storage); return 0; } int mtx_fprint(FILE* stream, struct mtx const m) { int total = 0; int chars = 0; size_t i, j; for(i = 0; i < m.nrows; ++i) { for(j = 0; j < m.ncols; ++j) { chars = mpfr_fprintf(stream, "%Rf ", *(m.storage + i * m.ncols + j)); if (chars < 0) { return 0; } total += chars; } chars = fprintf(stream, "\n"); if (chars < 0) { return 0; } total += chars; } return total; } int mtx_fscan(FILE* stream, struct mtx m, char const* delim) { char buf[LINE_MAX]; size_t i, j; for (i = 0; i < m.nrows; ++i) { fgets(buf, LINE_MAX, stream); char* token = (char*) strtok(buf, delim); for (j = 0; j < m.ncols; ++j) { if (NULL == token) return -1; double d = atof(token); mpfr_t* const ptr = m.storage + i * m.ncols + j; mpfr_set_d(*ptr, d, MPFR_RNDN); token = (char*) strtok(NULL, delim); } } return 0; } int mtx_fill(struct mtx m, mpfr_t val, mpfr_t diagval) { size_t i, j; #pragma omp parallel for shared(m) private(i, j) schedule(static) for (i = 0; i < m.nrows; ++i) { for (j = 0; j < m.ncols; ++j) { mpfr_t* const ptr = m.storage + i * m.ncols + j; if (m.nrows == m.ncols) { if (i == j) { mpfr_set(*ptr, diagval, MPFR_RNDN); } else { mpfr_set(*ptr, val, MPFR_RNDN); } } else { mpfr_set(*ptr, val, MPFR_RNDN); } } } return 0; } int mtx_fill_d(struct mtx m, double val, double diagval) { size_t i, j; #pragma omp parallel for shared(m) private(i, j) schedule(static) for (i = 0; i < m.nrows; ++i) { for (j = 0; j < m.ncols; ++j) { mpfr_t* const ptr = m.storage + i * m.ncols + j; if (m.nrows == m.ncols) { if (i == j) { mpfr_set_d(*ptr, diagval, MPFR_RNDN); } else { mpfr_set_d(*ptr, val, MPFR_RNDN); } } else { mpfr_set_d(*ptr, val, MPFR_RNDN); } } } return 0; } int mtx_copy(struct mtx rop, struct mtx const op) { if (rop.nrows != op.nrows || rop.ncols != op.ncols) return -1; size_t i, j; #pragma omp parallel for shared(rop) private(i, j) schedule(static) for (i = 0; i < rop.nrows; ++i) { for (j = 0; j < rop.ncols; ++j) { mpfr_t* const rptr = rop.storage + i * rop.ncols + j; mpfr_t* const optr = op.storage + i * op.ncols + j; mpfr_set(*rptr, *optr, MPFR_RNDN); } } return 0; } int mtx_mul(struct mtx rop, struct mtx const op1, struct mtx const op2) { if (rop.nrows != op1.nrows || rop.ncols != op2.ncols) return -1; if (op1.ncols != op2.nrows) return -1; mpfr_prec_t const prec = mpfr_get_prec(*rop.storage); size_t i, j, k; #pragma omp parallel for shared(rop) private(i, j, k) schedule(static) for (i = 0; i < op1.nrows; ++i) { for (j = 0; j < op2.ncols; ++j) { mpfr_t* const prop = rop.storage + i * rop.ncols + j; mpfr_set_ui(*prop, 0, MPFR_RNDN); for (k = 0; k < op1.ncols; ++k) { mpfr_t* const pop1 = op1.storage + i * op1.ncols + k; mpfr_t* const pop2 = op2.storage + k * op2.ncols + j; mpfr_t tmp; mpfr_init2(tmp, prec); mpfr_mul(tmp, *pop1, *pop2, MPFR_RNDN); mpfr_add(*prop, *prop, tmp, MPFR_RNDN); mpfr_clear(tmp); } } } return 0; } int mtx_mulval(struct mtx rop, struct mtx const op1, mpfr_t op2) { if (rop.nrows != op1.nrows || rop.ncols != op1.ncols) { return -1; } size_t i, j; #pragma omp parallel for shared(rop) private(i, j) schedule(static) for (i = 0; i < op1.nrows; ++i) { for (j = 0; j < op1.ncols; ++j) { mpfr_t* const prop = rop.storage + i * rop.ncols + j; mpfr_t* const pop1 = op1.storage + i * op1.ncols + j; mpfr_mul(*prop, *pop1, op2, MPFR_RNDN); } } return 0; } int mtx_add(struct mtx rop, struct mtx const op1, struct mtx const op2) { if (op1.nrows != op2.nrows || op1.ncols != op2.ncols) { return -1; } if (rop.nrows != op1.nrows || rop.ncols != op1.ncols) { return -1; } size_t i, j; #pragma omp parallel for shared(rop) private(i, j) schedule(static) for (i = 0; i < rop.nrows; ++i) { for (j = 0; j < rop.ncols; ++j) { mpfr_add(*(rop.storage + i * rop.ncols + j), *(op1.storage + i * op1.ncols + j), *(op2.storage + i * op2.ncols + j), MPFR_RNDN); } } return 0; } int mtx_tr(struct mtx rop, struct mtx const op) { if (rop.nrows != op.ncols || rop.ncols != op.nrows) { return -1; } size_t i, j; #pragma omp parallel for shared(rop) private(i, j) schedule(static) for (i = 0; i < rop.nrows; ++i) { for (j = 0; j < rop.ncols; ++j) { mpfr_set(*(rop.storage + i * rop.ncols + j), *(op.storage + j * op.ncols + i), MPFR_RNDN); } } return 0; }
raytracer.h
#pragma once #include "resource.h" #include <iostream> #include <linalg.h> #include <memory> #include <omp.h> #include <random> using namespace linalg::aliases; namespace cg::renderer { struct ray { ray(float3 position, float3 direction) : position(position) { this->direction = normalize(direction); } float3 position; float3 direction; }; struct payload { float t; float3 bary; cg::color color; }; template<typename VB> struct triangle { triangle(const VB& vertex_a, const VB& vertex_b, const VB& vertex_c); float3 a; float3 b; float3 c; float3 ba; float3 ca; float3 na; float3 nb; float3 nc; float3 ambient; float3 diffuse; float3 emissive; }; template<typename VB> inline triangle<VB>::triangle( const VB& vertex_a, const VB& vertex_b, const VB& vertex_c) { a = float3{vertex_a.x, vertex_a.y, vertex_a.z}; b = float3{vertex_b.x, vertex_b.y, vertex_b.z}; c = float3{vertex_c.x, vertex_c.y, vertex_c.z}; ba = b - a; ca = c - a; na = float3{vertex_a.nx, vertex_a.ny, vertex_a.nz}; nb = float3{vertex_b.nx, vertex_b.ny, vertex_b.nz}; nc = float3{vertex_c.nx, vertex_c.ny, vertex_c.nz}; ambient = {vertex_a.ambient_r, vertex_a.ambient_g, vertex_a.ambient_b}; diffuse = {vertex_a.diffuse_r, vertex_a.diffuse_g, vertex_a.diffuse_b}; emissive = {vertex_a.emissive_r, vertex_a.emissive_g, vertex_a.emissive_b}; } template<typename VB> class aabb { public: void add_triangle(const triangle<VB> triangle); const std::vector<triangle<VB>>& get_triangles() const; bool aabb_test(const ray& ray) const; protected: std::vector<triangle<VB>> triangles; float3 aabb_min; float3 aabb_max; }; struct light { float3 position; float3 color; }; template<typename VB, typename RT> class raytracer { public: raytracer(){}; ~raytracer(){}; void set_render_target(std::shared_ptr<resource<RT>> in_render_target); void clear_render_target(const RT& in_clear_value); void set_viewport(size_t in_width, size_t in_height); void set_vertex_buffers(std::vector<std::shared_ptr<cg::resource<VB>>> in_vertex_buffers); void set_index_buffers(std::vector<std::shared_ptr<cg::resource<unsigned int>>> in_index_buffers); void build_acceleration_structure(); std::vector<aabb<VB>> acceleration_structures; void ray_generation(float3 position, float3 direction, float3 right, float3 up, size_t depth, size_t accumulation_num); payload trace_ray(const ray& ray, size_t depth, float max_t = 1000.f, float min_t = 0.001f) const; payload intersection_shader(const triangle<VB>& triangle, const ray& ray) const; std::function<payload(const ray& ray)> miss_shader = nullptr; std::function<payload(const ray& ray, payload& payload, const triangle<VB>& triangle, size_t depth)> closest_hit_shader = nullptr; std::function<payload(const ray& ray, payload& payload, const triangle<VB>& triangle)> any_hit_shader = nullptr; float2 get_jitter(int frame_id); protected: std::shared_ptr<cg::resource<RT>> render_target; std::shared_ptr<cg::resource<float3>> history; std::vector<std::shared_ptr<cg::resource<unsigned int>>> index_buffers; std::vector<std::shared_ptr<cg::resource<VB>>> vertex_buffers; size_t width = 1920; size_t height = 1080; }; template<typename VB, typename RT> inline void raytracer<VB, RT>::set_render_target( std::shared_ptr<resource<RT>> in_render_target) { render_target = in_render_target; } template<typename VB, typename RT> inline void raytracer<VB, RT>::clear_render_target(const RT& in_clear_value) { for (size_t i = 0; i < width * height; ++i) { render_target->item(i) = in_clear_value; if (history) { history->item(i) = float3{0.0f, 0.0f, 0.0f};// AMGOUS. } } } template<typename VB, typename RT> void raytracer<VB, RT>::set_index_buffers(std::vector<std::shared_ptr<cg::resource<unsigned int>>> in_index_buffers) { index_buffers = in_index_buffers; } template<typename VB, typename RT> inline void raytracer<VB, RT>::set_vertex_buffers(std::vector<std::shared_ptr<cg::resource<VB>>> in_vertex_buffers) { vertex_buffers = in_vertex_buffers; } template<typename VB, typename RT> inline void raytracer<VB, RT>::build_acceleration_structure() { for (size_t shape_id = 0; shape_id < index_buffers.size(); ++shape_id) { auto& index_buffer = index_buffers[shape_id]; auto& vertex_buffer = vertex_buffers[shape_id]; size_t index_id = 0; aabb<VB> aabb; while (index_id < index_buffer->get_number_of_elements()) { triangle<VB> triangle( vertex_buffer->item(index_buffer->item(index_id++)), vertex_buffer->item(index_buffer->item(index_id++)), vertex_buffer->item(index_buffer->item(index_id++))); aabb.add_triangle(triangle); } acceleration_structures.push_back(aabb); } } template<typename VB, typename RT> inline void raytracer<VB, RT>::set_viewport(size_t in_width, size_t in_height) { width = in_width; height = in_height; history = std::make_shared<cg::resource<float3>>(width, height); } template<typename VB, typename RT> inline void raytracer<VB, RT>::ray_generation( float3 position, float3 direction, float3 right, float3 up, size_t depth, size_t accumulation_num) { float frame_weight = 1.0f / static_cast<float>(accumulation_num); for (size_t frame_id = 0; frame_id < accumulation_num; ++frame_id) { float2 jitter = get_jitter(static_cast<int>(frame_id)); for (int x = 0; x < static_cast<int>(width); ++x) { #pragma omp parallel for for (int y = 0; y < static_cast<int>(height); ++y) { float u = (2.0f * x + jitter.x) / static_cast<float>(width - 1) - 1.0f; float v = (2.0f * y + jitter.y) / static_cast<float>(height - 1) - 1.0f; u *= static_cast<float>(width) / static_cast<float>(height); float3 ray_direction = direction + u * right - v * up; ray ray(position, ray_direction); payload payload = trace_ray(ray, depth); auto& history_pixel = history->item(x, y); history_pixel += sqrt(frame_weight * float3{ payload.color.r, payload.color.g, payload.color.b, }); render_target->item(x, y) = RT::from_float3(history_pixel); } } } } template<typename VB, typename RT> inline payload raytracer<VB, RT>::trace_ray( const ray& ray, size_t depth, float max_t, float min_t) const { if (depth == 0) { return miss_shader(ray); } --depth; payload closest_hit_payload = {}; closest_hit_payload.t = max_t; const triangle<VB>* closest_triangle = nullptr; for (auto& aabb: acceleration_structures) { if (!aabb.aabb_test(ray)) { continue; } for (auto& triangle: aabb.get_triangles()) { payload payload = intersection_shader(triangle, ray); if (payload.t > min_t && payload.t < closest_hit_payload.t) { closest_hit_payload = payload; closest_triangle = &triangle; if (any_hit_shader) { return any_hit_shader(ray, payload, triangle); } } } } if ((closest_hit_payload.t < max_t) && closest_hit_shader) { return closest_hit_shader( ray, closest_hit_payload, *closest_triangle, depth); } return miss_shader(ray); } template<typename VB, typename RT> inline payload raytracer<VB, RT>::intersection_shader( const triangle<VB>& triangle, const ray& ray) const { payload payload{}; payload.t = -1.0f; float3 pvec = cross(ray.direction, triangle.ca); float det = dot(triangle.ba, pvec); if (-1e-8 < det && det < 1e-8) { return payload; } float inv_det = 1.0f / det; float3 tvec = ray.position - triangle.a; float u = dot(tvec, pvec) * inv_det; if (u < 0.0f || u > 1.0f) { return payload; } float3 qvec = cross(tvec, triangle.ba); float v = dot(ray.direction, qvec) * inv_det; if (v < 0.0f || u + v > 1.0f) { return payload; } payload.t = dot(triangle.ca, qvec) * inv_det; payload.bary = float3{1.0f - u - v, u, v}; return payload; } template<typename VB, typename RT> float2 raytracer<VB, RT>::get_jitter(int frame_id) { float2 result{0.0f, 0.0f}; constexpr int base_x = 2; constexpr float inverted_base_x = 1.0f / base_x; int index_x = frame_id + 1; float fraction_x = inverted_base_x; while (index_x > 0) { result.x += (index_x % base_x) * fraction_x; index_x /= base_x; fraction_x *= inverted_base_x; } constexpr int base_y = 3; constexpr float inverted_base_y = 1.0f / base_y; int index_y = frame_id + 1; float fraction_y = inverted_base_y; while (index_y > 0) { result.y += (index_y % base_y) * fraction_y; index_y /= base_y; fraction_y *= inverted_base_y; } return result - 0.5f; } template<typename VB> inline void aabb<VB>::add_triangle(const triangle<VB> triangle) { if (triangles.empty()) { aabb_max = triangle.a; aabb_min = triangle.a; } triangles.push_back(triangle); aabb_min = min(aabb_min, triangle.a); aabb_min = min(aabb_min, triangle.b); aabb_min = min(aabb_min, triangle.c); aabb_max = max(aabb_max, triangle.a); aabb_max = max(aabb_max, triangle.b); aabb_max = max(aabb_max, triangle.c); } template<typename VB> inline const std::vector<triangle<VB>>& aabb<VB>::get_triangles() const { return triangles; } template<typename VB> inline bool aabb<VB>::aabb_test(const ray& ray) const { float3 inverted_ray_direction = float3(1.0f) / ray.direction; float3 t0 = (aabb_max - ray.position) * inverted_ray_direction; float3 t1 = (aabb_min - ray.position) * inverted_ray_direction; float3 tmax = max(t0, t1); float3 tmin = min(t0, t1); return maxelem(tmin) <= minelem(tmax); } }// namespace cg::renderer
contact_residualbased_block_builder_and_solver.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Vicente Mataix Ferrandiz // // #if !defined(KRATOS_CONTACT_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER ) #define KRATOS_CONTACT_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER /* System includes */ /* External includes */ /* Project includes */ #include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ContactResidualBasedBlockBuilderAndSolver * @ingroup ContactStructuralMechanicsApplication * @brief Current class provides an implementation for contact builder and solving operations. * @details The RHS is constituted by the unbalanced loads (residual). Degrees of freedom are reordered putting the restrained degrees of freedom at the end of the system ordered in reverse order with respect to the DofSet. Imposition of the dirichlet conditions is naturally dealt with as the residual already contains this information. Calculation of the reactions involves a cost very similiar to the calculation of the total residual * @author Vicente Mataix Ferrandiz * @tparam TSparseSpace The sparse matrix system considered * @tparam TDenseSpace The dense matrix system * @tparam TLinearSolver The type of linear solver considered * @tparam TBuilderAndSolver The builder and solver considered as base */ template<class TSparseSpace, class TDenseSpace, //= DenseSpace<double>, class TLinearSolver, //= LinearSolver<TSparseSpace,TDenseSpace> class TBuilderAndSolver = ResidualBasedBlockBuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver > > class ContactResidualBasedBlockBuilderAndSolver : public TBuilderAndSolver { public: ///@name Type Definitions ///@{ /// Pointer definition of ContactResidualBasedBlockBuilderAndSolver KRATOS_CLASS_POINTER_DEFINITION(ContactResidualBasedBlockBuilderAndSolver); /// Definitions dependent of the base class typedef TBuilderAndSolver BaseType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; ///@} ///@name Life Cycle ///@{ /** Constructor. */ ContactResidualBasedBlockBuilderAndSolver( typename TLinearSolver::Pointer pNewLinearSystemSolver) : BaseType(pNewLinearSystemSolver) { } /** Destructor. */ ~ContactResidualBasedBlockBuilderAndSolver() override { } ///@} ///@name Operators ///@{ /** * @brief This method imposses the BC of Dirichlet. It will fill with 0 the corresponding DoF * @param pScheme The pointer to the scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS of the system * @param Dx The current solution increment * @param b The RHS of the system */ void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) override { FixIsolatedNodes(rModelPart); BaseType::ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b); FreeIsolatedNodes(rModelPart); } /** * @brief This method buils the RHS of the system of equations * @param pScheme The pointer to the scheme considered * @param rModelPart The model part of the problem to solve * @param b The RHS of the system */ void BuildRHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& b ) override { FixIsolatedNodes(rModelPart); BaseType::BuildRHS(pScheme, rModelPart, b); FreeIsolatedNodes(rModelPart); } ///@} ///@name Operations ///@{ ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ /** * @brief This method check the ISOLATED nodes and it fixes * @param rModelPart The model part to compute */ void FixIsolatedNodes(ModelPart& rModelPart) { KRATOS_ERROR_IF_NOT(rModelPart.HasSubModelPart("Contact")) << "CONTACT MODEL PART NOT CREATED" << std::endl; KRATOS_ERROR_IF_NOT(rModelPart.HasSubModelPart("ComputingContact")) << "CONTACT COMPUTING MODEL PART NOT CREATED" << std::endl; ModelPart& contact_model_part = rModelPart.GetSubModelPart("Contact"); ModelPart& computing_contact_model_part = rModelPart.GetSubModelPart("ComputingContact"); // We reset the flag auto& nodes_array = contact_model_part.Nodes(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { (nodes_array.begin() + i)->Set(VISITED, false); (nodes_array.begin() + i)->Set(ISOLATED, false); } // Now we set the flag in the nodes auto& conditions_array = computing_contact_model_part.Conditions(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(conditions_array.size()); ++i) { auto it_cond = conditions_array.begin() + i; auto& geom = it_cond->GetGeometry(); for (std::size_t i_node = 0; i_node < geom.size(); ++i_node) { geom[i_node].SetLock(); if (geom[i_node].Is(VISITED) == false) { geom[i_node].Set(ISOLATED, it_cond->Is(ISOLATED)); geom[i_node].Set(VISITED, true); } else { geom[i_node].Set(ISOLATED, geom[i_node].Is(ISOLATED) && it_cond->Is(ISOLATED)); } geom[i_node].UnSetLock(); } } // We fix the LM #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; if (it_node->Is(ISOLATED) == true) { if (it_node->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) it_node->Fix(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE); else if (it_node->SolutionStepsDataHas(VECTOR_LAGRANGE_MULTIPLIER_X)) { it_node->Fix(VECTOR_LAGRANGE_MULTIPLIER_X); it_node->Fix(VECTOR_LAGRANGE_MULTIPLIER_Y); it_node->Fix(VECTOR_LAGRANGE_MULTIPLIER_Z); } } } } /** * @brief This method releases the ISOLATED nodes * @param rModelPart The model part to compute */ void FreeIsolatedNodes(ModelPart& rModelPart) { KRATOS_ERROR_IF_NOT(rModelPart.HasSubModelPart("Contact")) << "CONTACT MODEL PART NOT CREATED" << std::endl; ModelPart& contact_model_part = rModelPart.GetSubModelPart("Contact"); // We release the LM auto& nodes_array = contact_model_part.Nodes(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; if (it_node->Is(ISOLATED) == true) { if (it_node->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) it_node->Free(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE); else if (it_node->SolutionStepsDataHas(VECTOR_LAGRANGE_MULTIPLIER_X)) { it_node->Free(VECTOR_LAGRANGE_MULTIPLIER_X); it_node->Free(VECTOR_LAGRANGE_MULTIPLIER_Y); it_node->Free(VECTOR_LAGRANGE_MULTIPLIER_Z); } } } } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ContactResidualBasedBlockBuilderAndSolver */ ///@} ///@name Type Definitions */ ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_CONTACT_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
parallel_block.c
#include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <time.h> #define T 1000 #define NULL 0 long Kol_block_v_stroke,Kol_block_v_stolbce; char *String1,*String2; long M,N; typedef struct { int BLOCKI; int BLOCKJ; int maxi; int maxj; struct MaxElement* next; }MaxElement,*PMaxElement; typedef struct { int max; int maxi; int maxj; struct MAXIMAL* next; }MAXIMAL,*PMAX; typedef struct { //int col[T]; //int row[T]; int* col; int* row; PMAX el_max; }BLOCK,*PBLOCK; typedef struct { char inf; struct String* next; }String,*PString; PString Cons( char x, PString L) { PString T1; T1 = ( PString )malloc( sizeof(String)); T1->inf = x; T1->next = L; return T1; } PMAX Cons_el( int x,int i, int j, PMAX L) { PMAX T1; T1 = ( PMAX )malloc( sizeof(MAXIMAL)); T1->max = x; T1->maxi = i; T1->maxj = j; T1->next = L; return T1; } int max_from_two( int x, int y) { return ((x>=y)?x:y); } int min_from_two( int x, int y) { return ((x<=y)?x:y); } long size_of_file( FILE* fileS1) { long size_file; fseek(fileS1, 0, SEEK_END); // переместить указатель в конец файла size_file = ftell(fileS1); // получить текущую позицию fseek(fileS1, 0, SEEK_SET); // вернуть указатель на начало return size_file; } int read_file( FILE* file_string, char* String) { char b; int i; i = 0; while(!feof(file_string)){ b = fgetc(file_string); if (isalpha(b)) { String[i] = toupper(b); i++; } } return i; } FILE* write_to_file( FILE* file1, PString Str1,int x, int y) { PString Str; fprintf(file1," %c",'*'); while( Str1 ){ // printf(" %c",Str1->inf); fprintf(file1,"%c",Str1->inf); Str = Str1; Str1 = Str1->next; free(Str); } fprintf(file1," { %i %i }",x,y); fprintf(file1,"%c",'\n'); //printf("\n"); return file1; } PMaxElement maxel ( BLOCK **mas, PMaxElement B,int* p ) { int i,j,maximal,maximalI,maximalJ; PMaxElement m; maximal = 0; maximalI = 0; maximalJ = 0; for ( i = 0; i < Kol_block_v_stolbce; i++ ) for ( j = 0; j < Kol_block_v_stroke; j++ ) if ( mas[i][j].el_max->max >= maximal ){ maximal = mas[i][j].el_max->max; maximalI = mas[i][j].el_max->maxi; maximalJ = mas[i][j].el_max->maxj; } printf(" TT %i ",maximal); for ( i = 0; i < Kol_block_v_stolbce; i++) for ( j = 0; j < Kol_block_v_stroke; j++ ) if ( mas[i][j].el_max->max == maximal ){ while ( mas[i][j].el_max ) { m = ( PMaxElement )malloc( sizeof ( MaxElement )); m -> BLOCKI = i; m -> BLOCKJ = j; m -> maxi = mas[i][j].el_max->maxi-1; m -> maxj = mas[i][j].el_max->maxj-1; m -> next = B; B = m; mas[i][j].el_max = mas[i][j].el_max->next; } } *p = maximal; return m; } int differences(char S1, char S2, int penalty, int** BLOSUM) { int difference; if(( S2 == 'N')||(S1 == 'N')) difference = penalty/2; if( S2 == 'A') { if ( S1 == 'G') difference = BLOSUM [1][0]; if ( S1 == 'C') difference = BLOSUM [2][0]; if ( S1 == 'T') difference = BLOSUM [3][0]; if ( S1 == '-') difference = BLOSUM [4][0]; } if( S2 == 'G'){ if ( S1 == 'A') difference = BLOSUM [1][0]; if ( S1 == 'C') difference = BLOSUM [2][1]; if ( S1 == 'T') difference = BLOSUM [3][1]; if ( S1 == '-') difference = BLOSUM [4][1]; } if ( S2 == 'C') { if ( S1 == 'A') difference = BLOSUM [2][0]; if ( S1 == 'G') difference = BLOSUM [2][1]; if ( S1 == 'T') difference = BLOSUM [3][2]; if ( S1 == '-') difference = BLOSUM [4][2]; } if ( S2 == 'T'){ if ( S1 == 'A') difference = BLOSUM [3][0]; if ( S1 == 'G') difference = BLOSUM [3][1]; if ( S1 == 'C') difference = BLOSUM [3][2]; if ( S1 == '-') difference = BLOSUM [4][3]; } if( S2 == '-') { if ( S1 == 'G') difference = BLOSUM [4][1]; if ( S1 == 'C') difference = BLOSUM [4][2]; if ( S1 == 'T') difference = BLOSUM [4][3]; if ( S1 == 'A') difference = BLOSUM [4][0]; } return difference; } int similarities(char S2,int** BLOSUM) { int similarity; if ( S2 == 'A') similarity = BLOSUM [0][0]; if ( S2 == 'G') similarity = BLOSUM [1][1]; if ( S2 == 'C') similarity = BLOSUM [2][2]; if ( S2 == 'T') similarity = BLOSUM [3][3]; if ( S2 == '-') similarity = BLOSUM [4][4]; return similarity; } void blocks( int *rows, int *cols,int diag,int M_bl, int N_bl, PBLOCK A1,int penalty, int** BLOSUM) { int **h; int wp1,wp2; int i,j,Size_colomn,Size_row; int w,max,maximum; if ( N % T != 0 && N_bl == Kol_block_v_stroke -1 ) Size_row = N % T; else Size_row = T; if ( M % T != 0 && M_bl == Kol_block_v_stolbce - 1 ) Size_colomn = M % T; else Size_colomn = T; h = (int**)malloc(( Size_colomn + 1 )*sizeof(int*)); for ( i = 0; i < Size_colomn+1;i ++) h[i] = (int*)malloc(( Size_row + 1 )*sizeof(int)); h[0][0] = diag; for ( i = 1; i <= Size_colomn; i++) h[i][0] = cols[i-1]; for ( j = 1; j <= Size_row; j++) h[0][j] = rows[j-1]; maximum = 0; A1->row = (int*)calloc(T,sizeof(int)); A1->col = (int*)calloc(T,sizeof(int)); for ( i = 1; i <= Size_colomn; i++) for ( j = 1; j <= Size_row; j++) { max = 0; if( String1[i-1 + M_bl*T]==String2[j-1+ N_bl*T]) { switch (String2[j-1+ N_bl*T]) { case 'A': w = BLOSUM [0][0]; break; case 'G': w = BLOSUM [1][1]; break; case 'C': w = BLOSUM [2][2]; break; case 'T': w = BLOSUM [3][3]; break; case '-': w = BLOSUM [4][4]; break; } } else { if(( String2[j-1+ N_bl*T] == 'N')||(String1[i-1 + M_bl*T] == 'N')) w = penalty/2; if( String2[j-1+ N_bl*T] == 'A') { switch (String1[i-1 + M_bl*T]) { case 'G': w = BLOSUM [1][0]; break; case 'C': w = BLOSUM [2][0]; break; case 'T': w = BLOSUM [3][0]; break; case '-': w = BLOSUM [4][0]; break; } } if( String2[j-1+ N_bl*T] == 'G'){ switch (String1[i-1 + M_bl*T]) { case 'A': w = BLOSUM [1][0]; break; case 'C': w = BLOSUM [2][1]; break; case 'T': w = BLOSUM [3][1]; break; case '-': w = BLOSUM [4][1]; break; } } if ( String2[j-1+ N_bl*T] == 'C') { switch (String1[i-1 + M_bl*T]) { case 'A': w = BLOSUM [2][0]; break; case 'G': w = BLOSUM [2][1]; break; case 'T': w = BLOSUM [3][2]; break; case '-': w = BLOSUM [4][2]; break; } } if ( String2[j-1+ N_bl*T] == 'T'){ switch (String1[i-1 + M_bl*T]) { case 'A': w = BLOSUM [3][0]; break; case 'G': w = BLOSUM [3][1]; break; case 'C': w = BLOSUM [3][2]; break; case '-': w = BLOSUM [4][3]; break; } } if( String2[j-1+ N_bl*T] == '-') { switch (String1[i-1 + M_bl*T]) { case 'G': w = BLOSUM [4][1]; break; case 'C': w = BLOSUM [4][2]; break; case 'T': w = BLOSUM [4][3]; break; case 'A': w = BLOSUM [4][0]; break; } } } if (String1[i-1+ M_bl*T] != '-') switch (String1[i-1+ M_bl*T]) { case 'G': wp1 = BLOSUM [4][1]; break; case 'C': wp1 = BLOSUM [4][2]; break; case 'T': wp1 = BLOSUM [4][3]; break; case 'A': wp1 = BLOSUM [4][0]; break; } else wp1 = BLOSUM [4][4]; if (String2[j-1+ N_bl*T] != '-') switch (String2[j-1+ N_bl*T]) { case 'G': wp2 = BLOSUM [4][1]; break; case 'C': wp2 = BLOSUM [4][2]; break; case 'T': wp2 = BLOSUM [4][3]; break; case 'A': wp2 = BLOSUM [4][0]; break; } else wp2 = BLOSUM [4][4]; /* if( String1[i-1 + M_bl*T]==String2[j-1+ N_bl*T]) w = match; else w = mismatch; if (String1[i-1+ M_bl*T] != '_') wp1 = mismatch; else wp1 = match; if (String2[j-1+ N_bl*T] != '_') wp2 = mismatch; else wp2 = match; */ if (( h[i-1][ j-1] + w )> max ) max = h[i-1][ j-1] + w ; if ( ( h[i][ j-1] + wp2 )> max ) max = h[i][ j-1] + wp2 ; if ( ( h[i-1][ j] + wp1 )> max ) max = h[i-1][ j] + wp1 ; h[i][j] = max; if ( h[i][j] >= maximum ) maximum = h[i][j]; if ( i == T ) A1->row[j-1] = h[i][j]; if ( j == T ) A1->col[i-1] = h[i][j]; } A1->el_max = NULL; for ( i = 1; i <= Size_colomn; i++) for ( j = 1; j <= Size_row; j++) if ( h[i][j] == maximum ) A1->el_max = Cons_el(h[i][j],i,j,A1->el_max); for ( j=0; j<Size_colomn;j++) free (h[j]); free (h); } void clear (int M_bl, int N_bl,int i2, int j2, BLOCK** A1) { int i,j; for ( i = M_bl+1; i <= i2; i++) for ( j = 0 ; j <= j2; j++) { free(A1[i][j].col); free(A1[i][j].row); } for ( j = N_bl+1; j <= j2; j++) for ( i = 0; i <= M_bl; i++) { free(A1[i][j].col); free(A1[i][j].row); } } unsigned char** recollect( int *rows, int *cols,int diag,int M_bl, int N_bl,unsigned char **P,int penalty, int** BLOSUM) { int **h; unsigned char previous; int wp1,wp2; int i,j,Size_colomn,Size_row; int w,max; if ( N % T != 0 && N_bl == Kol_block_v_stroke -1 ) Size_row = N % T; else Size_row = T; if ( M % T != 0 && M_bl == Kol_block_v_stolbce - 1 ) Size_colomn = M % T; else Size_colomn = T; h = (int**)malloc(( Size_colomn + 1 )*sizeof(int*)); for ( i = 0; i < Size_colomn+1;i ++) h[i] = (int*)malloc(( Size_row + 1 )*sizeof(int)); h[0][0] = diag; for ( i = 1; i <= Size_colomn; i++) h[i][0] = cols[i-1]; for ( j = 1; j <= Size_row; j++) h[0][j] = rows[j-1]; for ( i = 1; i <= Size_colomn; i++) for ( j = 1; j <= Size_row; j++) { max = 0; previous = 0; /* if( String1[i-1 + M_bl*T]==String2[j-1+ N_bl*T]) w = match; else w = mismatch; if (String1[i-1+ M_bl*T] != '_') wp1 = mismatch; else wp1 = match; if (String2[j-1+ N_bl*T] != '_') wp2 = mismatch; else wp2 = match; */ if( String1[i-1 + M_bl*T]==String2[j-1+ N_bl*T]) w = similarities(String2[j-1+ N_bl*T],BLOSUM); else w = differences(String1[i-1 + M_bl*T],String2[j-1+ N_bl*T],penalty,BLOSUM); if (String1[i-1+ M_bl*T] != '-') wp1 = differences(String1[i-1 + M_bl*T],'-',penalty,BLOSUM); else wp1 = similarities('-',BLOSUM); if (String2[j-1+ N_bl*T] != '-') wp2 = differences('-',String2[j-1+ N_bl*T],penalty,BLOSUM); else wp2 = similarities('-',BLOSUM); if (( h[i-1][ j-1] + w )> max ) { max = h[i-1][ j-1] + w ; previous = 2; } if ( ( h[i][ j-1] + wp2 )> max ){ max = h[i][ j-1] + wp2 ; previous = 1; } if ( ( h[i-1][ j] + wp1 )> max ) { max = h[i-1][ j] + wp1 ; previous = 3; } h[i][j] = max; P[i-1][j-1] = previous; } for ( j=0; j<Size_colomn;j++) free (h[j]); free (h); return P; } int main() { FILE * fileS1,*fileS2; long size_file; int c; int i,j,i1,j1,i2,j2,i3,j3; int **BLOSUM; int *row1; unsigned char **Prev; int space,dif,sim,score ; int *p; int penalty ; // штраф за разрыв int endI,endJ; // указывают на позиции в исходных строках, на которых // было закончено выравнивание; int beginI,beginJ;// указывают позиции в строках, начиная с которых // найдено оптимальное выравнивание; int difference, similarity; PString Str1,Str2; BLOCK ** mat; PMaxElement L; time_t time1,time2; time1 = clock(); fileS1=fopen("string1.txt","rb"); size_file = size_of_file(fileS1); String1 = (char*)malloc( size_file * sizeof(char)); M = read_file(fileS1,String1); fclose(fileS1); fileS2=fopen("string2.txt","rb"); size_file = size_of_file(fileS2); String2 = (char*)malloc( size_file * sizeof(char)); N = read_file(fileS2,String2); fclose(fileS2); printf("\n Length of First string is %i\n",M); printf("\n Length of Second string is %i\n",N); // выделяем память для двумерной матрицы BLOSUM; BLOSUM = (int**)malloc( 5 * sizeof(int*)); for ( i = 0; i < 5;i ++) BLOSUM[i] = (int*)malloc( 5 * sizeof(int)); // считываем матрицу BLOSUM из файла BLOSUM.txt; поставь пробел в матрице после всех чисел fileS1 = fopen("BLOSUM.txt","r"); fscanf(fileS1," %i",&penalty); for(i = 0; i < 5; i++) for(j = 0; j < 5; j++){ fscanf(fileS1," %i",&c); while((c!=' ')&&(!feof(fileS1))){ BLOSUM[i][j] = c; c = fgetc(fileS1); } } fclose(fileS1); if ( N%T == 0) Kol_block_v_stroke = N/T; else Kol_block_v_stroke = N/T + 1; if ( M%T == 0) Kol_block_v_stolbce = M/T; else Kol_block_v_stolbce = M/T + 1; mat = ( BLOCK ** )malloc( Kol_block_v_stolbce * sizeof(BLOCK *));//M_block for ( i = 0; i < Kol_block_v_stolbce ;i ++) mat[i] = ( BLOCK *)malloc( Kol_block_v_stroke * sizeof(BLOCK));//N_block row1 = (int*)malloc( T * sizeof(int)); for ( i = 0; i < T; i++){ row1[i] = 0; } //for ( i = 0; i < Kol_block_v_stolbce;i++) //for ( j = 0; j < Kol_block_v_stroke; j++) for (j = 0; j <= Kol_block_v_stolbce + Kol_block_v_stroke - 2; j = j + 1) { #pragma omp parallel for for (i = max_from_two(0, j - Kol_block_v_stroke + 1); i <= min_from_two(Kol_block_v_stolbce-1, j); i = i + 1) if ( i == 0) if ( j-i == 0) blocks(row1,row1,0,i,j-i,&mat[i][j-i],penalty,BLOSUM); else blocks(row1,mat[i][j-i-1].col,0,i,j-i,&mat[i][j-i],penalty,BLOSUM); else if ( j-i == 0) blocks(mat[i-1][j-i].row,row1,0,i,j-i,&mat[i][j-i],penalty,BLOSUM); else blocks(mat[i-1][j-i].row,mat[i][j-i-1].col,mat[i-1][j-i-1].col[T-1],i,j-i,&mat[i][j-i],penalty,BLOSUM); } /* if ( Kol_block_v_stroke-1 < Kol_block_v_stolbce) { for (j = 0; j <= Kol_block_v_stroke-2; j = j + 1) { #pragma omp parallel for for (i = 0; i <= j; i = i + 1) { if ( i == 0) if ( j-i == 0) blocks(row1,row1,0,i,j-i,&mat[i][j-i]); else blocks(row1,mat[i][j-i-1].col,0,i,j-i,&mat[i][j-i]); else if ( j-i == 0) blocks(mat[i-1][j-i].row,row1,0,i,j-i,&mat[i][j-i]);//mat[i][j] = * else blocks(mat[i-1][j-i].row,mat[i][j-i-1].col,mat[i-1][j-i-1].col[T-1],i,j-i,&mat[i][j-i]); } } } else { for (j = 0; j < Kol_block_v_stolbce-1; j = j + 1) { #pragma omp parallel for for (i = 0; i <= j ; i = i + 1) { if ( i == 0) if ( j-i == 0) blocks(row1,row1,0,i,j-i,&mat[i][j-i]); else blocks(row1,mat[i][j-i-1].col,0,i,j-i,&mat[i][j-i]); else if ( j-i == 0) blocks(mat[i-1][j-i].row,row1,0,i,j-i,&mat[i][j-i]);//mat[i][j] = * else blocks(mat[i-1][j-i].row,mat[i][j-i-1].col,mat[i-1][j-i-1].col[T-1],i,j-i,&mat[i][j-i]); } } for (j = Kol_block_v_stolbce-1; j < Kol_block_v_stroke-1; j = j + 1) { #pragma omp parallel for for (i = 0; i < Kol_block_v_stolbce; i = i + 1) { if ( i == 0) if ( j-i == 0) blocks(row1,row1,0,i,j-i,&mat[i][j-i]); else blocks(row1,mat[i][j-1-i].col,0,i,j-i,&mat[i][j-i]); else if ( j-i== 0) blocks(mat[i-1][j-i].row,row1,0,i,j-i,&mat[i][j-i]);//mat[i][j] = * else blocks(mat[i-1][j-i].row,mat[i][j-i-1].col,mat[i-1][j-i-1].col[T-1],i,j-i,&mat[i][j-i]); } } } if ( Kol_block_v_stroke > Kol_block_v_stolbce) for (j = Kol_block_v_stroke-1; j <= Kol_block_v_stolbce + Kol_block_v_stroke - 2; j = j + 1) { #pragma omp parallel for for (i = j - Kol_block_v_stroke+1 ; i < Kol_block_v_stolbce; i = i + 1) { if ( i == 0) if ( j-i == 0) blocks(row1,row1,0,i,j-i,&mat[i][j-i]); else blocks(row1,mat[i][j-i-1].col,0,i,j-i,&mat[i][j-i]); else if ( j-i == 0) blocks(mat[i-1][j-i].row,row1,0,i,j-i,&mat[i][j-i]);//mat[i][j] = * else blocks(mat[i-1][j-i].row,mat[i][j-i-1].col,mat[i-1][j-i-1].col[T-1],i,j-i,&mat[i][j-i]); } } else { for (j = Kol_block_v_stroke-1; j < Kol_block_v_stolbce-1; j = j + 1) { #pragma omp parallel for for (i = j - Kol_block_v_stroke+1 ; i <= j; i = i + 1) { if ( i == 0) if ( j-i == 0) blocks(row1,row1,0,i,j-i,&mat[i][j-i]); else blocks(row1,mat[i][j-i-1].col,0,i,j-i,&mat[i][j-i]); else if ( j-i== 0) blocks(mat[i-1][j-i].row,row1,0,i,j-i,&mat[i][j-i]);//mat[i][j] = * else blocks(mat[i-1][j-i].row,mat[i][j-i-1].col,mat[i-1][j-i-1].col[T-1],i,j-i,&mat[i][j-i]); } } for (j = Kol_block_v_stolbce-1; j <= Kol_block_v_stolbce - 2 + Kol_block_v_stroke; j = j + 1) { #pragma omp parallel for for (i = j - Kol_block_v_stroke+1; i < Kol_block_v_stolbce; i = i + 1) { if ( i == 0) if ( j-i == 0) blocks(row1,row1,0,i,j-i,&mat[i][j-i]); else blocks(row1,mat[i][j-i-1].col,0,i,j-i,&mat[i][j-i]); else if ( j-i == 0) blocks(mat[i-1][j-i].row,row1,0,i,j-i,&mat[i][j-i]); else blocks(mat[i-1][j-i].row,mat[i][j-i-1].col,mat[i-1][j-i-1].col[T-1],i,j-i,&mat[i][j-i]); } } } */ L = NULL; p = &score; L = maxel(mat,L,p); printf("SCORE %i ",score); printf("Alignment\n"); fileS1 = fopen("newS1.txt","w"); fileS2 = fopen("newS2.txt","w"); Prev = (unsigned char**)calloc(T,sizeof(unsigned char*)); for ( i = 0; i < T;i ++) Prev[i] = (unsigned char*)calloc(T,sizeof(unsigned char)); while( L ){ space = 0; dif = 0; sim = 0; if ( L ){ Str1 = NULL; Str2 = NULL; i = L->maxi + T * L->BLOCKI; j = L->maxj + T * L->BLOCKJ; printf(" %i %i ",i,j); i1 = L->BLOCKI; j1 = L->BLOCKJ; if ( L->BLOCKI == 0) if ( L->BLOCKJ == 0) { //clear(i1,j1,Kol_block_v_stolbce-1,Kol_block_v_stroke-1,mat); Prev = recollect(row1,row1,0,i1,j1,Prev,penalty,BLOSUM); } else { //clear(i1,j1,Kol_block_v_stolbce-1,Kol_block_v_stroke-1,mat); Prev = recollect(row1,mat[i1][j1-1].col,0,i1,j1,Prev,penalty,BLOSUM); } else if ( L->BLOCKJ == 0) { //clear(i1,j1,Kol_block_v_stolbce-1,Kol_block_v_stroke-1,mat); Prev = recollect(mat[i1-1][j1].row,row1,0,i1,j1,Prev,penalty,BLOSUM); } else { //clear(i1,j1,Kol_block_v_stolbce-1,Kol_block_v_stroke-1,mat); Prev = recollect(mat[i1-1][j1].row,mat[i1][j1-1].col,mat[i1-1][j1-1].col[T-1],i1,j1,Prev,penalty,BLOSUM); } endI = i+1; endJ = j+1; i2 = L->maxi; j2 = L->maxj; printf("Alignment\n"); while( Prev[i2][j2] != 0 && i2 != -1 && j2 != -1 && i>=0 && j>=0){ beginI = i+1; beginJ = j+1; if(i2>=0 && j2>=0 && Prev[i2][j2] == 3){ Str1 = Cons('-',Str1); Str2 = Cons(String1[i],Str2); space++; i = i - 1; i2 = i2 - 1; } if(i2>=0 && j2>=0 && Prev[i2][j2] == 2 ){ Str1 = Cons(String2[j],Str1); Str2 = Cons(String1[i],Str2); i = i - 1; j = j - 1; i2 = i2 - 1; j2 = j2 - 1; if (Str1->inf == Str2->inf) sim++; else dif++; } if (i2>=0 && j2>=0 && Prev[i2][j2] == 1) { Str2 = Cons('-',Str2); Str1 = Cons(String2[j],Str1); space++; j = j - 1; j2 = j2 -1; } if (i2 == -1 || j2 == -1){ i3 = i1; j3 = j1; i1 = i/T; j1 = j/T; if ( i>=0 && j>=0 ){ if ( i1 == 0) if ( j1 == 0) { //clear(i1,j1,i3,j3,mat); Prev = recollect(row1,row1,0,i1,j1,Prev,penalty,BLOSUM); } else { //clear(i1,j1,i3,j3,mat); Prev = recollect(row1,mat[i1][j1-1].col,0,i1,j1,Prev,penalty,BLOSUM); } else if ( j1 == 0) { //clear(i1,j1,i3,j3,mat); Prev = recollect(mat[i1-1][j1].row,row1,0,i1,j1,Prev,penalty,BLOSUM); } else { //clear(i1,j1,i3,j3,mat); Prev = recollect(mat[i1-1][j1].row,mat[i1][j1-1].col,mat[i1-1][j1-1].col[T-1],i1,j1,Prev,penalty,BLOSUM); } } } if ( i2 == -1 ) i2 = T-1; if ( j2 == -1 ) j2 = T-1; } // записываем полученную выравненную строку S1 в файл // newS1.txt, каждую новую выравненную строку записываем // с новой строки в файле и ставим перед ней '*' printf("\n First string : \n"); fileS1 = write_to_file(fileS1, Str2,beginI,endI); // записываем полученную выравненную строку S2 в файл // newS2.txt, каждую новую выравненную строку записываем // с новой строки в файле и ставим перед ней '*' printf("\n Second string : \n"); fileS2 = write_to_file(fileS2,Str1,beginJ,endJ); printf("\n Score = %i ",score); printf(" \n Simularities = %i", sim); printf("\n Differences = %i ",dif); printf(" \n Spaces = %i \n", space); printf(" Position of alignment at First string %i %i",beginI,endI); printf(" \n Position of alignment at Second string %i %i \n",beginJ,endJ); L = L ->next; } } time2 = clock(); printf("\n Time = %i ", ( time2-time1 )); fclose(fileS1); fclose(fileS2); for ( i=0; i< 4;i++) free (BLOSUM[i]); free (BLOSUM); for ( j=0; j<T;j++) free (Prev[j]); free (Prev); for ( j=0; j< Kol_block_v_stolbce;j++) free (mat[j]); free (mat); system("pause"); return 0; }
repeat_base.h
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // Copyright (c) 2006-2015, Knut Reinert, FU Berlin // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: David Weese <david.weese@fu-berlin.de> // ========================================================================== #ifndef SEQAN_HEADER_REPEAT_BASE_H #define SEQAN_HEADER_REPEAT_BASE_H #if SEQAN_ENABLE_PARALLELISM #include <seqan/parallel.h> #endif // #if SEQAN_ENABLE_PARALLELISM namespace seqan { /*! * @class Repeat * @headerfile <seqan/index.h> * @brief Store information about a repeat. * * @signature template <typename TPos, typename TPeriod> * struct Repeat; * * @tparam TPeriod Type to use for storing the repeat period. Default: 1 * @tparam TPos Type to use for storing positions. * * @see findRepeats * * @var TPos Repeat::endPosition; * @brief The end position of the repeat of type <tt>TPos</tt>. * * @var TPos Repeat::beginPosition; * @brief The begin position of the repeat of type <tt>TPos</tt>. * * @var TPeriod Repeat::period; * @brief The period of the repeat of type <tt>TPeriod</tt>. */ template <typename TPos, typename TPeriod> struct Repeat { TPos beginPosition; TPos endPosition; TPeriod period; }; template <typename TPos, typename TPeriod> struct Value< Repeat<TPos, TPeriod> > { typedef TPos Type; }; template <typename TPos, typename TPeriod> struct Size< Repeat<TPos, TPeriod> > { typedef TPeriod Type; }; template <typename TSize> struct RepeatFinderParams { TSize minRepeatLen; TSize maxPeriod; }; // custom TSpec for our customized wotd-Index struct TRepeatFinder; template <typename TText> struct Cargo<Index<TText, IndexWotd<TRepeatFinder> > > { typedef Index<TText, IndexWotd<TRepeatFinder> > TIndex; typedef typename Size<TIndex>::Type TSize; typedef RepeatFinderParams<TSize> Type; }; // node predicate template <typename TText, typename TSpec> bool nodePredicate(Iter<Index<TText, IndexWotd<TRepeatFinder> >, TSpec> &it) { // return countOccurrences(it) * nodeDepth(it) >= cargo(container(it)).minRepeatLen; return countOccurrences(it) * repLength(it) >= cargo(container(it)).minRepeatLen; } // monotonic hull template <typename TText, typename TSpec> bool nodeHullPredicate(Iter<Index<TText, IndexWotd<TRepeatFinder> >, TSpec> &it) { // return nodeDepth(it) <= cargo(container(it)).maxPeriod; return repLength(it) <= cargo(container(it)).maxPeriod; } template <typename TPos> struct RepeatLess_ : public std::binary_function<TPos, TPos, bool> { // key less inline bool operator() (TPos const &a, TPos const &b) const { return posLess(a, b); } }; template <typename TValue> inline bool _repeatMaskValue(TValue const &) { // TODO(holtgrew): Maybe use unknownValue<TValue>() instead of specializing for all alphabets, especially since we have Rna5 now and might want Rna5Q later. return false; } template <> inline bool _repeatMaskValue(Dna5 const &val) { return val == unknownValue<Dna5>(); // 'N' } template <> inline bool _repeatMaskValue(Dna5Q const &val) { return val == unknownValue<Dna5Q>(); // 'N' } template <> inline bool _repeatMaskValue(Iupac const &val) { return val == unknownValue<Iupac>(); // 'N' } /* template <> inline bool _repeatMaskValue(AminoAcid val) { return val == 'X'; } */ /*! * @fn findRepeats * @headerfile <seqan/index.h> * @brief Search for repeats in a text. * * @signature void findRepeats(repeatString, text, minRepeatLength[, maxPeriod]); * * @param[out] repeatString A @link String @endlink of @link Repeat @endlink objects. * @param[in] text The text to search repeats in. Types: @link ContainerConcept @endlink * @param[in] minRepeatLength The minimum length each reported repeat must have. * @param[in] maxPeriod Optionally, the maximal period that reported repeats can have. Default: 1 * * Subsequences of undefined values/<tt>N</tt>s will always be reported. * * @section Examples * * The following demonstrates finding repeats of period 3. * * @include demos/index/find_repeats.cpp * * @code{.console} * # of repeats: 15 * i == 0, beginPosition = 3, endPosition = 7, period = 1 * i == 1, beginPosition = 46, endPosition = 53, period = 1 * i == 2, beginPosition = 101, endPosition = 105, period = 1 * i == 3, beginPosition = 105, endPosition = 109, period = 1 * i == 4, beginPosition = 164, endPosition = 169, period = 1 * i == 5, beginPosition = 291, endPosition = 297, period = 1 * i == 6, beginPosition = 319, endPosition = 327, period = 1 * i == 7, beginPosition = 400, endPosition = 404, period = 1 * i == 8, beginPosition = 442, endPosition = 446, period = 1 * i == 9, beginPosition = 468, endPosition = 473, period = 1 * i == 10, beginPosition = 476, endPosition = 480, period = 1 * i == 11, beginPosition = 507, endPosition = 513, period = 1 * i == 12, beginPosition = 561, endPosition = 566, period = 1 * i == 13, beginPosition = 623, endPosition = 627, period = 1 * i == 14, beginPosition = 655, endPosition = 659, period = 1 * @endcode * * @see AlphabetWithUnknownValueConcept#unknownValue * @see Repeat */ // TODO(holtgrew): minRepeatLength is 1-off. // period-1 optimization template <typename TRepeatStore, typename TString, typename TRepeatSize> inline void findRepeats(TRepeatStore &repString, TString const &text, TRepeatSize minRepeatLen) { typedef typename Value<TRepeatStore>::Type TRepeat; typedef typename Iterator<TString const>::Type TIterator; typedef typename Size<TString>::Type TSize; #if SEQAN_ENABLE_PARALLELISM typedef typename Value<TString>::Type TValue; if (length(text) > (TSize)(omp_get_max_threads() * 2 * minRepeatLen)) { // std::cerr << ">>> PARALLEL WABOOGIE!" << std::endl; // std::cerr << "omp_get_max_threads() == " << omp_get_max_threads() << std::endl; // Parallel case. // NOTE(holtgrew): The minimum text length check above makes it impossible that more than two chunks are // required to form an otherwise too short repeat. // TODO(holtgrew): Load balancing? Probably not worth it. String<TSize> splitters; String<TRepeatStore> threadLocalStores; // Each threads finds repeats on its chunk in parallel. #pragma omp parallel { // We have to determine the number of available threads at this point. We will use the number of thread // local stores to determin the number of available threads later on. #pragma omp master { // std::cerr << "omp_get_num_threads() == " << omp_get_num_threads() << std::endl; computeSplitters(splitters, length(text), omp_get_num_threads()); resize(threadLocalStores, omp_get_num_threads()); } // end of #pragma omp master #pragma omp barrier int const t = omp_get_thread_num(); TRepeatStore & store = threadLocalStores[t]; TRepeat rep; rep.beginPosition = 0; rep.endPosition = 0; rep.period = 1; // Flags used for force-adding repeats for the chunks that have a left/right neighbour. bool forceFirst = t > 0; bool forceLast = (t + 1) < omp_get_num_threads(); // #pragma omp critical // std::cerr << "omp_get_num_threads() == " << omp_get_num_threads() << std::endl; TIterator it = iter(text, splitters[t], Standard()); TIterator itEnd = iter(text, splitters[t + 1], Standard()); if (it != itEnd) { TValue last = *it; TSize repLeft = 0; TSize repRight = 1; for (++it; it != itEnd; ++it, ++repRight) { if (*it != last) { // #pragma omp critical // std::cerr << "t == " << t << ", last == " << last << ", repRight = " << repRight << ", repLeft == " << repLeft << ", minRepeatLen = " << minRepeatLen << ", forceFirst = " << forceFirst << std::endl; if (_repeatMaskValue(last) || (TRepeatSize)(repRight - repLeft) > minRepeatLen || forceFirst) { forceFirst = false; // insert repeat rep.beginPosition = splitters[t] + repLeft; rep.endPosition = splitters[t] + repRight; // #pragma omp critical // std::cerr << " t == " << t << ", append" << std::endl; appendValue(store, rep); } repLeft = repRight; last = *it; } } // #pragma omp critical // std::cerr << "t == " << t << ", last == " << last << ", repRight = " << repRight << ", repLeft == " << repLeft << ", minRepeatLen = " << minRepeatLen << ", forceLast = " << forceLast << std::endl; if (_repeatMaskValue(last) || (TRepeatSize)(repRight - repLeft) > minRepeatLen || forceLast) { // Insert repeat but only if it is not already in there. if (empty(store) || (back(store).beginPosition != repLeft && back(store).endPosition != repRight)) { rep.beginPosition = splitters[t] + repLeft; rep.endPosition = splitters[t] + repRight; // #pragma omp critical // std::cerr << " t == " << t << ", append" << std::endl; appendValue(store, rep); } } } } // end of #pragma omp parallel // std::cerr << ",-- REPEATS BEFORE MENDING\n"; // for (unsigned i = 0; i < length(threadLocalStores); ++i) // { // std::cerr << "| i = " << i << std::endl; // for (unsigned j = 0; j < length(threadLocalStores[i]); ++j) // std::cerr << "| threadLocalStores[" << i << "][" << j << "] == {" << threadLocalStores[i][j].beginPosition << ", " << threadLocalStores[i][j].endPosition << "}" << std::endl; // } // std::cerr << "`--" << std::endl; // Mend the splice points. // // We will copy out infixes described by fromPositions. String<Pair<TSize> > fromPositions; resize(fromPositions, length(threadLocalStores)); for (unsigned i = 0; i < length(fromPositions); ++i) { fromPositions[i].i1 = 0; fromPositions[i].i2 = length(threadLocalStores[i]); } // First, merge repeats spanning blocks. Do this iteratively until all has been merged. bool anyChange; do { anyChange = false; int lastNonEmpty = -1; for (unsigned i = 0; i < length(threadLocalStores); ++i) { if (fromPositions[i].i1 == fromPositions[i].i2) continue; // Skip empty buckets. if (lastNonEmpty != -1) { bool const adjacent = back(threadLocalStores[lastNonEmpty]).endPosition == front(threadLocalStores[i]).beginPosition; bool const charsEqual = text[back(threadLocalStores[lastNonEmpty]).beginPosition] == text[front(threadLocalStores[i]).beginPosition]; if (adjacent && charsEqual) { anyChange = true; back(threadLocalStores[lastNonEmpty]).endPosition = front(threadLocalStores[i]).endPosition; fromPositions[i].i1 += 1; } } if (fromPositions[i].i1 != fromPositions[i].i2) lastNonEmpty = i; } } while (anyChange); // Then, remove any repeats in the beginning and end of blocks that are too short. for (unsigned i = 0; i < length(threadLocalStores); ++i) { if (fromPositions[i].i1 == fromPositions[i].i2) continue; unsigned j = fromPositions[i].i1; TRepeatSize len = threadLocalStores[i][j].endPosition - threadLocalStores[i][j].beginPosition; if (!_repeatMaskValue(text[threadLocalStores[i][j].beginPosition]) && // Never remove mask value. len <= minRepeatLen) fromPositions[i].i1 += 1; if (fromPositions[i].i1 == fromPositions[i].i2) continue; j = fromPositions[i].i2 - 1; len = threadLocalStores[i][j].endPosition - threadLocalStores[i][j].beginPosition; if (!_repeatMaskValue(text[threadLocalStores[i][j].beginPosition]) && // Never remove mask value. len <= minRepeatLen) fromPositions[i].i2 -= 1; } // Last, build splitters for output in parallel. String<unsigned> outSplitters; appendValue(outSplitters, 0); for (unsigned i = 0; i < length(threadLocalStores); ++i) appendValue(outSplitters, back(outSplitters) + fromPositions[i].i2 - fromPositions[i].i1); // std::cerr << ",-- REPEATS AFTER MENDING\n"; // for (unsigned i = 0; i < length(threadLocalStores); ++i) // { // std::cerr << "| i = " << i << std::endl; // std::cerr << "`--, fromPositions[" << i << "] = (" << fromPositions[i].i1 << ", " << fromPositions[i].i2 << std::endl; // for (unsigned j = 0; j < length(threadLocalStores[i]); ++j) // std::cerr << " | threadLocalStores[" << i << "][" << j << "] == {" << threadLocalStores[i][j].beginPosition << ", " << threadLocalStores[i][j].endPosition << "}" << std::endl; // } // std::cerr << " `--" << std::endl; // Allocate memory. clear(repString); resize(repString, back(outSplitters)); // Copy back the repeats in parallel. unsigned nt = length(threadLocalStores); (void) nt; // Otherwise, GCC 4.6 warns, does not see it used in pragma clause below. #pragma omp parallel num_threads(nt) { int const t = omp_get_thread_num(); arrayCopy(iter(threadLocalStores[t], fromPositions[t].i1, Standard()), iter(threadLocalStores[t], fromPositions[t].i2, Standard()), iter(repString, outSplitters[t], Standard())); } // end of #pragma omp parallel } else { #endif // #if SEQAN_ENABLE_PARALLELISM // Sequential case. TRepeat rep; rep.period = 1; clear(repString); TIterator it = begin(text, Standard()); TIterator itEnd = end(text, Standard()); if (it == itEnd) return; TSize repLen = 1; for (++it; it != itEnd; ++it) { if (*it != *(it-1)) { if (_repeatMaskValue(*(it-1)) || repLen > (TSize)minRepeatLen) { // insert repeat rep.endPosition = it - begin(text, Standard()); rep.beginPosition = rep.endPosition - repLen; // std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<std::endl; appendValue(repString, rep); } repLen = 1; } else ++repLen; } if (_repeatMaskValue(*(it-1)) || repLen > (TSize)minRepeatLen) { // insert repeat rep.endPosition = length(text); rep.beginPosition = rep.endPosition - repLen; // std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<std::endl; appendValue(repString, rep); } #if SEQAN_ENABLE_PARALLELISM } #endif // #if SEQAN_ENABLE_PARALLELISM // #pragma omp critical // { // std::cerr << "thread #" << omp_get_thread_num() << " REPEATS:"; // for (unsigned i = 0; i < length(repString); ++i) { // std::cerr << " (" << repString[i].beginPosition << ", " << repString[i].endPosition << ", " << repString[i].period << ")"; // } // std::cerr << std::endl; // } } // TODO(holtgrew): Why for TString const and StringSet<> const? template <typename TRepeatStore, typename TString, typename TSpec, typename TRepeatSize> inline void findRepeats(TRepeatStore &repString, StringSet<TString, TSpec> const &text, TRepeatSize minRepeatLen) { typedef typename Value<TRepeatStore>::Type TRepeat; typedef typename Iterator<TString>::Type TIterator; typedef typename Value<TString>::Type TValue; typedef typename Size<TString>::Type TSize; TRepeat rep; rep.period = 1; clear(repString); for (unsigned i = 0; i < length(text); ++i) { TIterator it = begin(text[i], Standard()); TIterator itEnd = end(text[i], Standard()); if (it == itEnd) continue; TValue last = *it; TSize repLeft = 0; TSize repRight = 1; rep.beginPosition.i1 = i; rep.endPosition.i1 = i; for (++it; it != itEnd; ++it, ++repRight) { if (last != *it) { if (_repeatMaskValue(last) || (TRepeatSize)(repRight - repLeft) > minRepeatLen) { // insert repeat rep.beginPosition.i2 = repLeft; rep.endPosition.i2 = repRight; // std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<std::endl; appendValue(repString, rep); } repLeft = repRight; last = *it; } } if (_repeatMaskValue(last) || (TRepeatSize)(repRight - repLeft) > minRepeatLen) { // insert repeat rep.beginPosition.i2 = repLeft; rep.endPosition.i2 = repRight; // std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<std::endl; appendValue(repString, rep); } } } // main function template <typename TRepeatStore, typename TText, typename TRepeatSize, typename TPeriodSize> void findRepeats(TRepeatStore &repString, TText const &text, TRepeatSize minRepeatLen, TPeriodSize maxPeriod) { typedef Index<TText, IndexWotd<TRepeatFinder> > TIndex; typedef typename Size<TIndex>::Type TSize; typedef typename Iterator<TIndex, TopDown<ParentLinks<> > >::Type TNodeIterator; typedef typename Fibre<TIndex, FibreSA>::Type const TSA; typedef typename Infix<TSA>::Type TOccString; typedef typename Iterator<TOccString>::Type TOccIterator; typedef typename Value<TRepeatStore>::Type TRepeat; typedef typename Value<TOccString>::Type TOcc; typedef std::map<TOcc,TRepeat,RepeatLess_<TOcc> > TRepeatList; if (maxPeriod < 1) return; if (maxPeriod == 1) { findRepeats(repString, text, minRepeatLen); return; } TIndex index(text); TRepeatList list; // set repeat finder parameters cargo(index).minRepeatLen = minRepeatLen; cargo(index).maxPeriod = maxPeriod; TNodeIterator nodeIt(index); TOccIterator itA, itB, itRepBegin, itEnd; TRepeat rep; for (; !atEnd(nodeIt); goNext(nodeIt)) { if (isRoot(nodeIt)) continue; // get occurrences TOccString occ = getOccurrences(nodeIt); itA = begin(occ, Standard()); itEnd = end(occ, Standard()); itRepBegin = itB = itA; TSize repLen = repLength(nodeIt); // representative length if ((TSize)minRepeatLen <= repLen) continue; TSize diff, period = 0; // period of current repeat TSize repeatLen = 0; // overall length of current repeat TSize minLen = minRepeatLen - repLen; // minimum repeat length minus length of representative for (++itB; itB != itEnd; ++itB) { diff = posSub(*itB, *itA); if (diff != period || getSeqNo(*itA) != getSeqNo(*itB)) { // is the repeat long enough? if (repeatLen >= minLen) // is the repeat self overlapping or connected? if (parentRepLength(nodeIt) < period && period <= repLen) { // insert repeat rep.beginPosition = *itRepBegin; rep.endPosition = posAdd(*itA, period); rep.period = period; // std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<std::endl; list.insert(std::pair<TOcc,TRepeat>(rep.beginPosition, rep)); } itRepBegin = itA; period = diff; repeatLen = 0; } repeatLen += period; itA = itB; } // is the last repeat long enough? if (repeatLen >= minLen) // is the repeat self overlapping or connected? if (parentRepLength(nodeIt) < period && period <= repLen) { // insert repeat rep.beginPosition = *itRepBegin; rep.endPosition = posAdd(*itA, period); rep.period = period; // std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<std::endl; list.insert(std::pair<TOcc,TRepeat>(rep.beginPosition, rep)); } } // copy low-complex regions to result string clear(repString); reserve(repString, list.size(), Exact()); typename TRepeatList::const_iterator lit = list.begin(); typename TRepeatList::const_iterator litEnd = list.end(); for (TSize i = 0; lit != litEnd; ++lit, ++i) appendValue(repString, (*lit).second); } } // namespace seqan #endif
GB_binop__ne_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ne_uint16 // A.*B function (eWiseMult): GB_AemultB__ne_uint16 // A*D function (colscale): GB_AxD__ne_uint16 // D*A function (rowscale): GB_DxB__ne_uint16 // C+=B function (dense accum): GB_Cdense_accumB__ne_uint16 // C+=b function (dense accum): GB_Cdense_accumb__ne_uint16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ne_uint16 // C=scalar+B GB_bind1st__ne_uint16 // C=scalar+B' GB_bind1st_tran__ne_uint16 // C=A+scalar GB_bind2nd__ne_uint16 // C=A'+scalar GB_bind2nd_tran__ne_uint16 // C type: bool // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x != y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_UINT16 || GxB_NO_NE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ne_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ne_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ne_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__ne_uint16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__ne_uint16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__ne_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ne_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ne_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ne_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB_bind1st_tran__ne_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB_bind2nd_tran__ne_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__isge_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_uint8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__isge_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__isge_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint8) // A*D function (colscale): GB (_AxD__isge_uint8) // D*A function (rowscale): GB (_DxB__isge_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__isge_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__isge_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint8) // C=scalar+B GB (_bind1st__isge_uint8) // C=scalar+B' GB (_bind1st_tran__isge_uint8) // C=A+scalar GB (_bind2nd__isge_uint8) // C=A'+scalar GB (_bind2nd_tran__isge_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_UINT8 || GxB_NO_ISGE_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isge_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isge_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isge_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__first_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__first_fp64 // A.*B function (eWiseMult): GB_AemultB__first_fp64 // A*D function (colscale): GB_AxD__first_fp64 // D*A function (rowscale): GB_DxB__first_fp64 // C+=B function (dense accum): GB_Cdense_accumB__first_fp64 // C+=b function (dense accum): GB_Cdense_accumb__first_fp64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__first_fp64 // C=scalar+B GB_bind1st__first_fp64 // C=scalar+B' GB_bind1st_tran__first_fp64 // C=A+scalar (none) // C=A'+scalar (none) // C type: double // A type: double // B,b type: double // BinaryOp: cij = aij #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = x ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_FP64 || GxB_NO_FIRST_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__first_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__first_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__first_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__first_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__first_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__first_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__first_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__first_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB_bind1st_tran__first_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
fclaw2d_domain.c
/* Copyright (c) 2012 Carsten Burstedde, Donna Calhoun All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <fclaw2d_domain.h> #include <fclaw2d_convenience.h> /* Contains domain_destroy and others */ #include <fclaw2d_patch.h> #include <fclaw2d_exchange.h> #include <fclaw2d_global.h> void fclaw2d_domain_data_new(fclaw2d_domain_t *domain) { fclaw2d_domain_data_t* ddata = (fclaw2d_domain_data_t*) domain->user; ddata = FCLAW2D_ALLOC_ZERO(fclaw2d_domain_data_t, 1); domain->user = ddata; ddata->count_set_patch = ddata->count_delete_patch = 0; ddata->domain_exchange = NULL; ddata->domain_indirect = NULL; } void fclaw2d_domain_data_delete(fclaw2d_domain_t* domain) { fclaw2d_domain_data_t* ddata = (fclaw2d_domain_data_t*) domain->user; FCLAW2D_FREE (ddata); domain->user = NULL; } fclaw2d_domain_data_t *fclaw2d_domain_get_data(fclaw2d_domain_t *domain) { return (fclaw2d_domain_data_t *) domain->user; } void fclaw2d_domain_setup(fclaw2d_global_t* glob, fclaw2d_domain_t* new_domain) { fclaw2d_domain_t *old_domain = glob->domain; double t; if (old_domain == new_domain) { fclaw_global_infof("Building initial domain\n"); t = 0; glob->curr_time = t;//new_domain } else { fclaw_global_infof("Rebuilding domain\n"); fclaw2d_domain_data_new(new_domain); } fclaw_global_infof("Done\n"); } void fclaw2d_domain_reset(fclaw2d_global_t* glob) { fclaw2d_domain_t** domain = &glob->domain; fclaw2d_domain_data_t *ddata = fclaw2d_domain_get_data (*domain); int i, j; for(i = 0; i < (*domain)->num_blocks; i++) { fclaw2d_block_t *block = (*domain)->blocks + i; for(j = 0; j < block->num_patches; j++) { /* This is here to delete any patches created during initialization, and not through regridding */ fclaw2d_patch_t *patch = block->patches + j; fclaw2d_patch_data_delete(glob,patch); } block->user = NULL; } if (ddata->domain_exchange != NULL) { fclaw2d_exchange_delete(glob); } /* Output memory discrepancy for the ClawPatch */ if (ddata->count_set_patch != ddata->count_delete_patch) { printf ("[%d] This domain had Clawpatch set %d and deleted %d times\n", (*domain)->mpirank, ddata->count_set_patch, ddata->count_delete_patch); } fclaw2d_domain_data_delete(*domain); // Delete allocated pointers to set of functions. fclaw2d_domain_destroy(*domain); *domain = NULL; } void fclaw2d_domain_iterate_level_mthread (fclaw2d_domain_t * domain, int level, fclaw2d_patch_callback_t pcb, void *user) { #if (_OPENMP) int i, j; fclaw2d_block_t *block; fclaw2d_patch_t *patch; for (i = 0; i < domain->num_blocks; i++) { block = domain->blocks + i; #pragma omp parallel for private(patch,j) for (j = 0; j < block->num_patches; j++) { patch = block->patches + j; if (patch->level == level) { pcb (domain, patch, i, j, user); } } } #else fclaw_global_essentialf("fclaw2d_patch_iterator_mthread : We should not be here\n"); #endif }
zgeadd.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_geadd * * Performs an addition of two general rectangular matrices similarly to the * pzgeadd() function from the PBLAS library: * * \f[ B = \alpha * op( A ) + \beta * B, \f] * * where op( X ) is one of: * \f[ op( X ) = X, \f] * \f[ op( X ) = X^T, \f] * \f[ op( X ) = X^H, \f] * * alpha and beta are scalars and A, B are matrices with op( A ) an m-by-n or * n-by-m matrix depending on the value of transa and B an m-by-n matrix. * ******************************************************************************* * * @param[in] transa * Specifies whether the matrix A is non-transposed, transposed, or * conjugate transposed * - PlasmaNoTrans: op( A ) = A * - PlasmaTrans: op( A ) = A^T * - PlasmaConjTrans: op( A ) = A^H * * @param[in] m * Number of rows of the matrices op( A ) and B. * m >= 0. * * @param[in] n * Number of columns of the matrices op( A ) and B. * n >= 0. * * @param[in] alpha * Scalar factor of A. * * @param[in] pA * Matrix of size lda-by-k, where k is n when transa == PlasmaNoTrans * and m otherwise. * * @param[in] lda * Leading dimension of the array A. lda >= max(1,l), where l is m * when transa = PlasmaNoTrans and n otherwise. * * @param[in] beta * Scalar factor of B. * * @param[in,out] pB * Matrix of size ldb-by-n. * On exit, B = alpha * op( A ) + beta * B * * @param[in] ldb * Leading dimension of the array B. * ldb >= max(1,m). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_zgeadd * @sa plasma_cgeadd * @sa plasma_dgeadd * @sa plasma_sgeadd * ******************************************************************************/ int plasma_zgeadd(plasma_enum_t transa, int m, int n, plasma_complex64_t alpha, plasma_complex64_t *pA, int lda, plasma_complex64_t beta, plasma_complex64_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((transa != PlasmaNoTrans) && (transa != PlasmaTrans) && (transa != PlasmaConjTrans)) { plasma_error("illegal value of transa"); return -1; } if (m < 0) { plasma_error("illegal value of m"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (pA == NULL) { plasma_error("NULL A"); return -5; } int am, an; if (transa == PlasmaNoTrans) { am = m; an = n; } else { am = n; an = m; } int bm = m; int bn = n; if (lda < imax(1, am)) { plasma_error("illegal value of lda"); return -6; } if (pB == NULL) { plasma_error("NULL B"); return -8; } if (ldb < imax(1, bm)) { plasma_error("illegal value of ldb"); return -9; } // quick return if (m == 0 || n == 0 || (alpha == 0.0 && beta == 1.0)) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_geadd(plasma, PlasmaComplexDouble, m, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, am, an, 0, 0, am, an, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, bm, bn, 0, 0, bm, bn, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, &sequence, &request); plasma_omp_zge2desc(pB, ldb, B, &sequence, &request); // Call tile async function. plasma_omp_zgeadd(transa, alpha, A, beta, B, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(A, pA, lda, &sequence, &request); plasma_omp_zdesc2ge(B, pB, ldb, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_geadd * * Performs an addition of two general rectangular matrices similarly to the * pzgeadd() function from the PBLAS library. Non-blocking tile version of * plasma_zgeadd(). May return before the computation is finished. Operates on * matrices stored by tiles. All matrices are passed through descriptors. All * dimensions are taken from the descriptors. Allows for pipelining of * operations at runtime. * ******************************************************************************* * * @param[in] transa * Specifies whether the matrix A is non-transposed, transposed, or * conjugate transposed * - PlasmaNoTrans: op( A ) = A * - PlasmaTrans: op( A ) = A^T * - PlasmaConjTrans: op( A ) = A^H * * @param[in] alpha * The scalar alpha. * * @param[in] A * Descriptor of matrix A. * * @param[in] beta * The scalar beta. * * @param[in,out] B * Descriptor of matrix B. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check the * sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zgeadd * @sa plasma_omp_cgeadd * @sa plasma_omp_dgeadd * @sa plasma_omp_sgeadd * ******************************************************************************/ void plasma_omp_zgeadd(plasma_enum_t transa, plasma_complex64_t alpha, plasma_desc_t A, plasma_complex64_t beta, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((transa != PlasmaNoTrans) && (transa != PlasmaTrans) && (transa != PlasmaConjTrans)) { plasma_error("illegal value of transa"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return int am = transa == PlasmaNoTrans ? A.m : A.n; if ((alpha == 0.0 || am == 0) && beta == 1.0) return; // Call the parallel function. plasma_pzgeadd(transa, alpha, A, beta, B, sequence, request); }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 24; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(24*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(12*t1+Ny+21,24)),floord(24*t2+Ny+20,24)),floord(24*t1-24*t2+Nz+Ny+19,24));t3++) { for (t4=max(max(max(0,ceild(3*t1-31,32)),ceild(24*t2-Nz-124,128)),ceild(24*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(12*t1+Nx+21,128)),floord(24*t2+Nx+20,128)),floord(24*t3+Nx+20,128)),floord(24*t1-24*t2+Nz+Nx+19,128));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),24*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),24*t3+22),128*t4+126),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
monolithic_pass.c
#include <stdio.h> #include <stdbool.h> #include <omp.h> int mono_main() { setvbuf(stdout, NULL, _IONBF, 0); // struct Point { // int x; // int y; // }; // // struct Point p1 = {0, 0}; // struct Point p2 = p1; // printf("%d %d\n", p1.x, p2.x); // p1.x = 29; // printf("%d %d\n", p1.x, p2.x); double totalScore = 0; double finalScore = 0; _Bool pass = false; #pragma omp parallel { printf("thread number %d\n", omp_get_thread_num()); printf("hello\n"); } for (;;) { printf("Please input the total score\n"); if (scanf("%lf", &totalScore) != 1) { fprintf(stderr, "Premature end of file before int was input, exiting"); // exit(-1); } if (totalScore < 0) { break; } printf("Please input the final score\n"); if (scanf("%lf", &finalScore) != 1) { fprintf(stderr, "Premature end of file before int was input, exiting"); // exit(-1); } pass = (totalScore >= 60) || ((finalScore >= 80) && (totalScore >= 40)); printf("isPass:%s\n", pass ? "true" : "false"); } return 0; } //int main() { // mono_main(); //}
critical-2.c
int i; void foo (int j) { #pragma omp critical (foo) hint (j + 1) /* { dg-error "constant integer expression" } */ i = i + 1; #pragma omp critical (foo),hint(j) /* { dg-error "constant integer expression" } */ i = i + 1; }
CorrCoef.c
#include "Python.h" #include "numpy/arrayobject.h" #include <fcntl.h> #include <math.h> #include <omp.h> #define VERSION "0.3" PyArrayObject * pearson(const double *d, const unsigned long n, const unsigned long l) { PyArrayObject *coef; double *c; npy_intp *dim; unsigned long ik, i, k, o, nn; double mk, sk, dk, h; double mi, si, sum; double *m, *s; nn = n * (n - 1) / 2; dim = malloc(sizeof(npy_intp)); dim[0] = n * (n - 1) / 2; coef = (PyArrayObject *) PyArray_ZEROS(1, dim, PyArray_DOUBLE, 0); free(dim); if(!coef) { PyErr_SetString(PyExc_MemoryError, "Cannot create output array."); return NULL; } /* mean and std */ m = malloc(n * sizeof(double)); s = malloc(n * sizeof(double)); if(!m || !s) { PyErr_SetString(PyExc_MemoryError, "Cannot create mean and std arrays."); return NULL; } #pragma omp parallel for private(i, k, h, mk, sk, dk) for(i = 0; i < n; i++) { mk = sk = 0; for(k = 0; k < l; k++) { dk = d[i*l + k]; h = dk - mk; mk += h / (k + 1); sk += h * (dk - mk); } m[i] = mk; s[i] = sqrt(sk / (l - 1)); } /* dot products */ c = (double *) coef->data; #pragma omp parallel for private(ik, i, k, mi, si, mk, sk, o) for(ik = 0; ik < nn; ik++) { i = ik / n; k = ik % n; if(k <= i) { i = n - i - 2; k = n - k - 1; } mi = m[i]; mk = m[k]; si = s[i]; sk = s[k]; sum = 0; #pragma omp parallel for reduction(+:sum) for(o = 0; o < l; o++) sum += (d[i*l + o] - mi) * (d[k*l + o] - mk) / si / sk; c[nn-(n-i)*((n-i)-1)/2+k-i-1] = sum / (l - 1); } free(m); free(s); return coef; } static PyObject * CorrCoef_Pearson(PyObject *self, PyObject* args) { PyObject *arg; PyArrayObject *data, *coef; int nthreads; nthreads = 0; if(!PyArg_ParseTuple(args, "O|I", &arg, &nthreads)) return NULL; data = (PyArrayObject *) PyArray_ContiguousFromObject(arg, PyArray_DOUBLE, 2, 2); if(!data) return NULL; if(nthreads) omp_set_num_threads(nthreads); coef = pearson((double *)data->data, data->dimensions[0], data->dimensions[1]); Py_DECREF(data); return PyArray_Return(coef); } static PyMethodDef CorrCoef_Methods[] = { {"Pearson", CorrCoef_Pearson, METH_VARARGS, "triu_corr = Pearson(data, num_threads)\n\nReturn Pearson product-moment correlation coefficients.\n\nParameters\n----------\ndata : array_like\nA 2-D array containing multiple variables and observations. Each row of `data` represents a variable, and each column a single observation of all those variables.\n\nnum_threads : int, optional\nThe maximum number of OpenMP threads used.\n\nReturns\n-------\ntriu_corr : ndarray\nThe upper triangle of the correlation coefficient matrix of the variables.\n"}, {NULL, NULL, 0, NULL} }; static struct PyModuleDef ModDef = { PyModuleDef_HEAD_INIT, "CorrCoef", NULL, -1, CorrCoef_Methods }; PyMODINIT_FUNC PyInit_CorrCoef(void) { PyObject *mod; mod = PyModule_Create(&ModDef); PyModule_AddStringConstant(mod, "__author__", "Aljoscha Rheinwalt <aljoscha.rheinwalt@uni-potsdam.de>"); PyModule_AddStringConstant(mod, "__version__", VERSION); import_array(); return mod; } int main(int argc, char **argv) { wchar_t pname[255]; PyImport_AppendInittab("CorrCoef", PyInit_CorrCoef); mbstowcs(pname, argv[0], strlen(argv[0])+1); Py_SetProgramName(pname); Py_Initialize(); PyImport_ImportModule("CorrCoef"); PyMem_RawFree(argv[0]); return 0; }
GB_unaryop__lnot_int64_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int64_uint8 // op(A') function: GB_tran__lnot_int64_uint8 // C type: int64_t // A type: uint8_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int64_uint8 ( int64_t *Cx, // Cx and Ax may be aliased uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
renderer.h
#pragma once #include <stb_image_write.h> #include "materials/radiometry.h" #include "scene/camera.h" #include "config.h" #include "debug.h" namespace pbr { class Image { public: Image(unsigned int rows, unsigned int cols) : _rows(rows), _cols(cols), _data() { _data.resize(rows * cols); } // Write to file as RGBA void write(const std::string& name) { stbi_flip_vertically_on_write(true); stbi_write_png(name.c_str(), _cols, _rows, 4, _data.data(), _cols * sizeof (Colori)); } int rows() { return _rows; } int cols() { return _cols; } Colori& operator[](size_t index) { return _data.at(index); } const Colori& operator[](size_t index) const { return _data.at(index); } private: std::vector<Colori> _data; const unsigned int _rows; const unsigned int _cols; }; template <class Integrator> class Renderer { public: void render(const Scene* scene, const Camera& camera, Image& outImage) { integrator.set_scene(scene); UniformRNG rng; // Iterate over all rows #if PBR_USE_THREADS #pragma omp parallel for private(rng) #endif for (int row = 0; row < outImage.rows(); ++row) { LOG_DEBUG("Row %d", row); // Iterate over all cols for (int col = 0; col < outImage.cols(); ++col) { Colorf color; for (int i = 0; i < PBR_SAMPLES_PER_PIXEL; ++i) { auto sample = rng.sample_disk(); #if PBR_STRATIFIED_SAMPLE // Split the pixel into four quadrants for stratified sampling // Modulo operations to choose these quadrants double center_x = (1. / 2.) * ((i % 2) * 2 - 1); double center_y = (1. / 2.) * (((i % 4) < 2) ? 1 : -1); double deviation_x = sample.x / 2.; double deviation_y = sample.y / 2.; #else double center_x = 0; double center_y = 0; double deviation_x = sample.x; double deviation_y = sample.y; #endif // Normalize (row + deviation, col + deviation) to (x, y) where x and y are between -1 and 1. double x = ((col + center_x + deviation_x) / PBR_OUTPUT_IMAGE_COLUMNS) * 2 - 1; double y = ((row + center_y + deviation_y) / PBR_OUTPUT_IMAGE_ROWS) * 2 - 1; Ray ray = camera.get_ray(x, y); color = color + integrator.trace_ray(ray, 0) / (PBR_SAMPLES_PER_PIXEL); } outImage[row * PBR_OUTPUT_IMAGE_COLUMNS + col] = to_colori(color); } } } private: Integrator integrator {}; }; }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 16; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file utils.h * \brief Basic utilility functions. */ #ifndef MXNET_COMMON_UTILS_H_ #define MXNET_COMMON_UTILS_H_ #include <dmlc/logging.h> #include <dmlc/omp.h> #include <nnvm/graph.h> #include <mxnet/engine.h> #include <mxnet/ndarray.h> #include <mxnet/op_attr_types.h> #include <mxnet/graph_attr_types.h> #include <nnvm/graph_attr_types.h> #include <memory> #include <vector> #include <type_traits> #include <utility> #include <random> #include <string> #include <thread> #include <algorithm> #include <functional> #include <limits> #include "../operator/mxnet_op.h" #if MXNET_USE_MKLDNN == 1 #include "../operator/nn/mkldnn/mkldnn_base-inl.h" #endif namespace mxnet { namespace common { /*! * \brief IndPtr should be non-negative, in non-decreasing order, start with 0 * and end with value equal with size of indices. */ struct csr_indptr_check { template<typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr, const nnvm::dim_t end, const nnvm::dim_t idx_size) { if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] || (i == 0 && indptr[i] != 0) || (i == end - 1 && indptr[end] != idx_size)) *out = kCSRIndPtrErr; } }; /*! * \brief Indices should be non-negative, less than the number of columns * and in ascending order per row. */ struct csr_idx_check { template<typename DType, typename IType, typename RType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const RType* indptr, const nnvm::dim_t ncols) { for (RType j = indptr[i]; j < indptr[i+1]; j++) { if (idx[j] >= ncols || idx[j] < 0 || (j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) { *out = kCSRIdxErr; break; } } } }; /*! * \brief Indices of RSPNDArray should be non-negative, * less than the size of first dimension and in ascending order */ struct rsp_idx_check { template<typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const nnvm::dim_t end, const nnvm::dim_t nrows) { if ((i < end && idx[i+1] <= idx[i]) || idx[i] < 0 || idx[i] >= nrows) *out = kRSPIdxErr; } }; template<typename xpu> void CheckFormatWrapper(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check); /*! * \brief Check the validity of CSRNDArray. * \param rctx Execution context. * \param input Input NDArray of CSRStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template<typename xpu> void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kCSRStorage) << "CheckFormatCSRImpl is for CSRNDArray"; const TShape shape = input.shape(); const TShape idx_shape = input.aux_shape(csr::kIdx); const TShape indptr_shape = input.aux_shape(csr::kIndPtr); const TShape storage_shape = input.storage_shape(); if ((shape.ndim() != 2) || (idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) || (indptr_shape[0] != shape[0] + 1) || (idx_shape[0] != storage_shape[0])) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kCSRShapeErr; }); return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, { mshadow::Stream<xpu> *s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), indptr_shape[0] - 1, idx_shape[0]); // no need to check indices if indices are empty if (idx_shape[0] != 0) { Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIdx).dptr<IType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]); } mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); }); } } /*! * \brief Check the validity of RowSparseNDArray. * \param rctx Execution context. * \param input Input NDArray of RowSparseStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template<typename xpu> void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kRowSparseStorage) << "CheckFormatRSPImpl is for RSPNDArray"; const TShape idx_shape = input.aux_shape(rowsparse::kIdx); if (idx_shape[0] != input.storage_shape()[0]) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kRSPShapeErr; }); return; } if (idx_shape[0] == 0) { return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, { mshadow::Stream<xpu> *s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0], val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(), idx_shape[0] - 1, input.shape()[0]); mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); } } template<typename xpu> void CheckFormatImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { int stype = input.storage_type(); if (stype == kCSRStorage) { CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kRowSparseStorage) { CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kDefaultStorage) { // no-op for default storage } else { LOG(FATAL) << "Unknown storage type " << stype; } } /*! \brief Pick rows specified by user input index array from a row sparse ndarray * and save them in the output sparse ndarray. */ template<typename xpu> void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s, const NDArray& input_nd, const TBlob& idx_data, const OpReqType req, NDArray* output_nd); /* \brief Casts tensor storage type to the new type. */ template<typename xpu> void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output); /*! \brief returns true if all storage types in `vstorage` are the same as target `stype`. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype) { if (!vstorage.empty()) { for (const auto& i : vstorage) { if (i != stype) return false; } return true; } return false; } /*! \brief returns true if all storage types in `vstorage` are the same as target `stype1` * or `stype2'. Sets boolean if both found. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool *has_both) { if (has_both) { *has_both = false; } if (!vstorage.empty()) { uint8_t has = 0; for (const auto i : vstorage) { if (i == stype1) { has |= 1; } else if (i == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as target `stype`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto& nd : ndarrays) { if (nd.storage_type() != stype) { return false; } } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as targets `stype1` or `stype2`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool *has_both) { if (has_both) { *has_both = false; } if (!ndarrays.empty()) { uint8_t has = 0; for (const auto& nd : ndarrays) { const NDArrayStorageType stype = nd.storage_type(); if (stype == stype1) { has |= 1; } else if (stype == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief returns true if storage type of any array in `ndarrays` * is the same as the target `stype`. false is returned for empty inputs. */ inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto& nd : ndarrays) { if (nd.storage_type() == stype) { return true; } } } return false; } /*! \brief returns true if any storage type `ndstype` in `ndstypes` * is the same as the target `stype`. false is returned for empty inputs. */ inline bool ContainsStorageType(const std::vector<int>& ndstypes, const NDArrayStorageType stype) { if (!ndstypes.empty()) { for (const auto& ndstype : ndstypes) { if (ndstype == stype) { return true; } } } return false; } /*! \brief get string representation of dispatch_mode */ inline std::string dispatch_mode_string(const DispatchMode x) { switch (x) { case DispatchMode::kFCompute: return "fcompute"; case DispatchMode::kFComputeEx: return "fcompute_ex"; case DispatchMode::kFComputeFallback: return "fcompute_fallback"; case DispatchMode::kVariable: return "variable"; case DispatchMode::kUndefined: return "undefined"; } return "unknown"; } /*! \brief get string representation of storage_type */ inline std::string stype_string(const int x) { switch (x) { case kDefaultStorage: return "default"; case kCSRStorage: return "csr"; case kRowSparseStorage: return "row_sparse"; } return "unknown"; } /*! \brief get string representation of device type */ inline std::string dev_type_string(const int dev_type) { switch (dev_type) { case Context::kCPU: return "cpu"; case Context::kGPU: return "gpu"; case Context::kCPUPinned: return "cpu_pinned"; case Context::kCPUShared: return "cpu_shared"; } return "unknown"; } /*! \brief get string representation of the operator stypes */ inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs, const int dev_mask, const std::vector<int>& in_attrs, const std::vector<int>& out_attrs) { std::ostringstream os; os << "operator = " << attrs.op->name << "\ninput storage types = ["; for (const int attr : in_attrs) { os << stype_string(attr) << ", "; } os << "]\n" << "output storage types = ["; for (const int attr : out_attrs) { os << stype_string(attr) << ", "; } os << "]\n" << "params = {"; for (auto kv : attrs.dict) { os << "\"" << kv.first << "\" : " << kv.second << ", "; } os << "}\n" << "context.dev_mask = " << dev_type_string(dev_mask); return os.str(); } /*! \brief get string representation of the operator */ inline std::string operator_string(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { std::string result = ""; std::vector<int> in_stypes; std::vector<int> out_stypes; in_stypes.reserve(inputs.size()); out_stypes.reserve(outputs.size()); auto xform = [](const NDArray arr) -> int { return arr.storage_type(); }; std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform); std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform); result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes); return result; } /*! \brief log message once. Intended for storage fallback warning messages. */ inline void LogOnce(const std::string& message) { typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore; auto log_store = LogStore::Get(); if (log_store->find(message) == log_store->end()) { LOG(INFO) << message; log_store->insert(message); } } /*! \brief log storage fallback event */ inline void LogStorageFallback(const nnvm::NodeAttrs& attrs, const int dev_mask, const std::vector<int>* in_attrs, const std::vector<int>* out_attrs) { static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true); if (!log) return; const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs); std::ostringstream os; const char* warning = "\nThe operator with default storage type will be dispatched " "for execution. You're seeing this warning message because the operator above is unable " "to process the given ndarrays with specified storage types, context and parameter. " "Temporary dense ndarrays are generated in order to execute the operator. " "This does not affect the correctness of the programme. " "You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to " "0 to suppress this warning."; os << "\nStorage type fallback detected:\n" << op_str << warning; LogOnce(os.str()); #if MXNET_USE_MKLDNN == 1 if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. " "You can re-enable by setting MXNET_MKLDNN_ENABLED=1"); if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_MKLDNN_CACHE_NUM is set." "Should only be set if " "your model has variable input shapes, " "as cache size may grow unbounded"); #endif } // heuristic to dermine number of threads per GPU inline int GetNumThreadsPerGPU() { // This is resource efficient option. return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2); } // heuristic to get number of matching colors. // this decides how much parallelism we can get in each GPU. inline int GetExecNumMatchColor() { // This is resource efficient option. int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1); return std::min(num_match_color, GetNumThreadsPerGPU()); } template<typename T, typename V> V ParallelAccumulate(const T* a, const int n, V start) { V sum = start; #pragma omp parallel for reduction(+:sum) for (int i = 0; i < n; ++i) { sum += a[i]; } return sum; } /*! * \brief * Helper function for ParallelSort. * DO NOT call this function directly. * Use the interface ParallelSort instead. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSortHelper(RandomIt first, size_t len, size_t grainsize, const Compare& comp) { if (len < grainsize) { std::sort(first, first+len, comp); } else { std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp); ParallelSortHelper(first+len/2, len - len/2, grainsize, comp); thr.join(); std::inplace_merge(first, first+len/2, first+len, comp); } } /*! * \brief * Sort the elements in the range [first, last) into the ascending order defined by * the comparator comp. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) { const auto num = std::distance(first, last); size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16)); ParallelSortHelper(first, num, grainsize, comp); } /*! * \brief * Sort the elements in the range [first, last) into ascending order. * The elements are compared using the default < operator. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) { ParallelSort(first, last, num_threads, std::less<typename std::iterator_traits<RandomIt>::value_type>()); } /*! * \brief Random Engine */ typedef std::mt19937 RANDOM_ENGINE; /*! * \brief Helper functions. */ namespace helper { /*! * \brief Helper for non-array type `T`. */ template <class T> struct UniqueIf { /*! * \brief Type of `T`. */ using SingleObject = std::unique_ptr<T>; }; /*! * \brief Helper for an array of unknown bound `T`. */ template <class T> struct UniqueIf<T[]> { /*! * \brief Type of `T`. */ using UnknownBound = std::unique_ptr<T[]>; }; /*! * \brief Helper for an array of known bound `T`. */ template <class T, size_t kSize> struct UniqueIf<T[kSize]> { /*! * \brief Type of `T`. */ using KnownBound = void; }; } // namespace helper /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs a non-array type `T`. The arguments `args` are passed to the * constructor of `T`. The function does not participate in the overload * resolution if `T` is an array type. */ template <class T, class... Args> typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) { return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param n The size of the array to construct. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs an array of unknown bound `T`. The function does not participate * in the overload resolution unless `T` is an array of unknown bound. */ template <class T> typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) { using U = typename std::remove_extent<T>::type; return std::unique_ptr<T>(new U[n]{}); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * * Constructs an arrays of known bound is disallowed. */ template <class T, class... Args> typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete; template<typename FCompType> FCompType GetFCompute(const nnvm::Op* op, const std::string& name, const Context& ctx) { static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>"); static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>"); if (ctx.dev_mask() == cpu::kDevMask) { return fcompute_cpu.get(op, nullptr); } else if (ctx.dev_mask() == gpu::kDevMask) { return fcompute_gpu.get(op, nullptr); } else { LOG(FATAL) << "Unknown device mask"; return nullptr; } } /*! * \brief Return the max integer value representable in the type `T` without loss of precision. */ template <typename T> constexpr size_t MaxIntegerValue() { return std::is_integral<T>::value ? std::numeric_limits<T>::max(): size_t(2) << (std::numeric_limits<T>::digits - 1); } template <> constexpr size_t MaxIntegerValue<mshadow::half::half_t>() { return size_t(2) << 10; } MSHADOW_XINLINE int ilog2ul(size_t a) { int k = 1; while (a >>= 1) ++k; return k; } MSHADOW_XINLINE int ilog2ui(unsigned int a) { int k = 1; while (a >>= 1) ++k; return k; } /*! * \brief Return an NDArray of all zeros. */ inline NDArray InitZeros(const NDArrayStorageType stype, const TShape &shape, const Context &ctx, const int dtype) { // NDArray with default storage if (stype == kDefaultStorage) { NDArray ret(shape, ctx, false, dtype); ret = 0; return ret; } // NDArray with non-default storage. Storage allocation is always delayed. return NDArray(stype, shape, ctx, true, dtype); } /*! * \brief Helper to add a NDArray of zeros to a std::vector. */ inline void EmplaceBackZeros(const NDArrayStorageType stype, const TShape &shape, const Context &ctx, const int dtype, std::vector<NDArray> *vec) { // NDArray with default storage if (stype == kDefaultStorage) { vec->emplace_back(shape, ctx, false, dtype); vec->back() = 0; } else { // NDArray with non-default storage. Storage allocation is always delayed. vec->emplace_back(stype, shape, ctx, true, dtype); } } /*! * \brief parallelize copy by OpenMP. */ template<typename DType> inline void ParallelCopy(DType* dst, const DType* src, index_t size) { static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_COPY_SIZE", 2000000000); if (size >= copy_block_size) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t i = 0; i < size; ++i) { dst[i] = src[i]; } } else { std::memcpy(dst, src, sizeof(DType) * size); } } } // namespace common } // namespace mxnet #endif // MXNET_COMMON_UTILS_H_
sum-mp-noshare.c
#include <omp.h> #define N 100000000 #define NTHREADS 8 int values[N]; int main(int argc, char *argv[]) { int tid; static int sum[NTHREADS]; omp_set_num_threads(NTHREADS); #pragma omp parallel private(tid) { int local_sum = 0; tid = omp_get_thread_num(); for (int i = 0; i < N; i++) local_sum += values[i] >> tid; sum[tid] = local_sum; } }
real_to_reciprocal.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <phonoc_array.h> #include <phonoc_const.h> #include <phonoc_utils.h> #include <phonon3_h/real_to_reciprocal.h> #include <lapack_wrapper.h> static void real_to_reciprocal_single_thread(lapack_complex_double *fc3_reciprocal, const double q[9], const double *fc3, const int is_compact_fc3, const double *shortest_vectors, const int svecs_dims[3], const int *multiplicity, const int *p2s_map, const int *s2p_map); static void real_to_reciprocal_openmp(lapack_complex_double *fc3_reciprocal, const double q[9], const double *fc3, const int is_compact_fc3, const double *shortest_vectors, const int svecs_dims[3], const int *multiplicity, const int *p2s_map, const int *s2p_map); static void real_to_reciprocal_elements(lapack_complex_double *fc3_rec_elem, const double q[9], const double *fc3, const int is_compact_fc3, const double *shortest_vectors, const int svecs_dims[3], const int *multiplicity, const int *p2s, const int *s2p, const int pi0, const int pi1, const int pi2); static lapack_complex_double get_phase_factor(const double q[], const int qi, const double *shortest_vectors, const int multi); static lapack_complex_double get_pre_phase_factor(const int i, const double q[9], const double *shortest_vectors, const int svecs_dims[3], const int *multiplicity, const int *p2s_map); /* fc3_reciprocal[num_patom, num_patom, num_patom, 3, 3, 3] */ void r2r_real_to_reciprocal(lapack_complex_double *fc3_reciprocal, const double q[9], const double *fc3, const int is_compact_fc3, const double *shortest_vectors, const int svecs_dims[3], const int *multiplicity, const int *p2s_map, const int *s2p_map, const int openmp_at_bands) { if (openmp_at_bands) { real_to_reciprocal_openmp(fc3_reciprocal, q, fc3, is_compact_fc3, shortest_vectors, svecs_dims, multiplicity, p2s_map, s2p_map); } else { real_to_reciprocal_single_thread(fc3_reciprocal, q, fc3, is_compact_fc3, shortest_vectors, svecs_dims, multiplicity, p2s_map, s2p_map); } } static void real_to_reciprocal_single_thread(lapack_complex_double *fc3_reciprocal, const double q[9], const double *fc3, const int is_compact_fc3, const double *shortest_vectors, const int svecs_dims[3], const int *multiplicity, const int *p2s_map, const int *s2p_map) { int i, j, k, num_patom, adrs_shift; lapack_complex_double pre_phase_factor; num_patom = svecs_dims[1]; for (i = 0; i < num_patom; i++) { for (j = 0; j < num_patom; j++) { for (k = 0; k < num_patom; k++) { real_to_reciprocal_elements(fc3_reciprocal + i * 27 * num_patom * num_patom + j * 27 * num_patom + k * 27, q, fc3, is_compact_fc3, shortest_vectors, svecs_dims, multiplicity, p2s_map, s2p_map, i, j, k); } } pre_phase_factor = get_pre_phase_factor( i, q, shortest_vectors, svecs_dims, multiplicity, p2s_map); adrs_shift = i * num_patom * num_patom * 27; for (j = 0; j < num_patom * num_patom * 27; j++) { fc3_reciprocal[adrs_shift + j] = phonoc_complex_prod(fc3_reciprocal[adrs_shift + j], pre_phase_factor); } } } static void real_to_reciprocal_openmp(lapack_complex_double *fc3_reciprocal, const double q[9], const double *fc3, const int is_compact_fc3, const double *shortest_vectors, const int svecs_dims[3], const int *multiplicity, const int *p2s_map, const int *s2p_map) { int i, j, k, jk, num_patom, adrs_shift; lapack_complex_double pre_phase_factor; num_patom = svecs_dims[1]; for (i = 0; i < num_patom; i++) { #pragma omp parallel for private(j, k) for (jk = 0; jk < num_patom * num_patom; jk++) { j = jk / num_patom; k = jk % num_patom; real_to_reciprocal_elements(fc3_reciprocal + i * 27 * num_patom * num_patom + j * 27 * num_patom + k * 27, q, fc3, is_compact_fc3, shortest_vectors, svecs_dims, multiplicity, p2s_map, s2p_map, i, j, k); } pre_phase_factor = get_pre_phase_factor( i, q, shortest_vectors, svecs_dims, multiplicity, p2s_map); adrs_shift = i * num_patom * num_patom * 27; #pragma omp parallel for for (j = 0; j < num_patom * num_patom * 27; j++) { fc3_reciprocal[adrs_shift + j] = phonoc_complex_prod(fc3_reciprocal[adrs_shift + j], pre_phase_factor); } } } static void real_to_reciprocal_elements(lapack_complex_double *fc3_rec_elem, const double q[9], const double *fc3, const int is_compact_fc3, const double *shortest_vectors, const int svecs_dims[3], const int *multiplicity, const int *p2s, const int *s2p, const int pi0, const int pi1, const int pi2) { int i, j, k, l, num_satom, adrs_shift, adrs_vec1, adrs_vec2 ; lapack_complex_double phase_factor, phase_factor1, phase_factor2; double fc3_rec_real[27], fc3_rec_imag[27]; for (i = 0; i < 27; i++) { fc3_rec_real[i] = 0; fc3_rec_imag[i] = 0; } num_satom = svecs_dims[0]; if (is_compact_fc3) { i = pi0; } else { i = p2s[pi0]; } for (j = 0; j < num_satom; j++) { if (s2p[j] != p2s[pi1]) { continue; } adrs_vec1 = j * svecs_dims[1] + pi0; phase_factor1 = get_phase_factor(q, 1, shortest_vectors + adrs_vec1 * svecs_dims[2] * 3, multiplicity[adrs_vec1]); for (k = 0; k < num_satom; k++) { if (s2p[k] != p2s[pi2]) { continue; } adrs_vec2 = k * svecs_dims[1] + pi0; phase_factor2 = get_phase_factor(q, 2, shortest_vectors + adrs_vec2 * svecs_dims[2] * 3, multiplicity[adrs_vec2]); adrs_shift = i * 27 * num_satom * num_satom + j * 27 * num_satom + k * 27; phase_factor = phonoc_complex_prod(phase_factor1, phase_factor2); for (l = 0; l < 27; l++) { fc3_rec_real[l] += lapack_complex_double_real(phase_factor) * fc3[adrs_shift + l]; fc3_rec_imag[l] += lapack_complex_double_imag(phase_factor) * fc3[adrs_shift + l]; } } } for (i = 0; i < 27; i++) { fc3_rec_elem[i] = lapack_make_complex_double(fc3_rec_real[i], fc3_rec_imag[i]); } } static lapack_complex_double get_pre_phase_factor(const int i, const double q[9], const double *shortest_vectors, const int svecs_dims[3], const int *multiplicity, const int *p2s_map) { int j; double pre_phase; lapack_complex_double pre_phase_factor; pre_phase = 0; for (j = 0; j < 3; j++) { pre_phase += shortest_vectors[ p2s_map[i] * svecs_dims[1] * svecs_dims[2] * 3 + j] * (q[j] + q[3 + j] + q[6 + j]); } assert(multiplicity[p2s_map[i] * svecs_dims[1]] == 1); pre_phase *= M_2PI; pre_phase_factor = lapack_make_complex_double(cos(pre_phase), sin(pre_phase)); return pre_phase_factor; } static lapack_complex_double get_phase_factor(const double q[], const int qi, const double *shortest_vectors, const int multi) { int i, j; double sum_real, sum_imag, phase; sum_real = 0; sum_imag = 0; for (i = 0; i < multi; i++) { phase = 0; for (j = 0; j < 3; j++) { phase += q[qi * 3 + j] * shortest_vectors[i * 3 + j]; } sum_real += cos(M_2PI * phase); sum_imag += sin(M_2PI * phase); } sum_real /= multi; sum_imag /= multi; return lapack_make_complex_double(sum_real, sum_imag); }
nanort.h
// // NanoRT, single header only modern ray tracing kernel. // /* The MIT License (MIT) Copyright (c) 2015 Light Transport Entertainment, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef __NANORT_H__ #define __NANORT_H__ #include <vector> #include <queue> #include <cmath> #include <limits> #include <cstdlib> #include <cstring> #include <string> namespace nanort { // Parallelized BVH build is not yet fully tested, // thus turn off if you face a problem when building BVH. #define NANORT_ENABLE_PARALLEL_BUILD (0) // Small vector class useful for multi-threaded environment. // // stack_container.h // // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. //#include "base/basictypes.h" // This allocator can be used with STL containers to provide a stack buffer // from which to allocate memory and overflows onto the heap. This stack buffer // would be allocated on the stack and allows us to avoid heap operations in // some situations. // // STL likes to make copies of allocators, so the allocator itself can't hold // the data. Instead, we make the creator responsible for creating a // StackAllocator::Source which contains the data. Copying the allocator // merely copies the pointer to this shared source, so all allocators created // based on our allocator will share the same stack buffer. // // This stack buffer implementation is very simple. The first allocation that // fits in the stack buffer will use the stack buffer. Any subsequent // allocations will not use the stack buffer, even if there is unused room. // This makes it appropriate for array-like containers, but the caller should // be sure to reserve() in the container up to the stack buffer size. Otherwise // the container will allocate a small array which will "use up" the stack // buffer. template <typename T, size_t stack_capacity> class StackAllocator : public std::allocator<T> { public: typedef typename std::allocator<T>::pointer pointer; typedef typename std::allocator<T>::size_type size_type; // Backing store for the allocator. The container owner is responsible for // maintaining this for as long as any containers using this allocator are // live. struct Source { Source() : used_stack_buffer_(false) {} // Casts the buffer in its right type. T *stack_buffer() { return reinterpret_cast<T *>(stack_buffer_); } const T *stack_buffer() const { return reinterpret_cast<const T *>(stack_buffer_); } // // IMPORTANT: Take care to ensure that stack_buffer_ is aligned // since it is used to mimic an array of T. // Be careful while declaring any unaligned types (like bool) // before stack_buffer_. // // The buffer itself. It is not of type T because we don't want the // constructors and destructors to be automatically called. Define a POD // buffer of the right size instead. char stack_buffer_[sizeof(T[stack_capacity])]; // Set when the stack buffer is used for an allocation. We do not track // how much of the buffer is used, only that somebody is using it. bool used_stack_buffer_; }; // Used by containers when they want to refer to an allocator of type U. template <typename U> struct rebind { typedef StackAllocator<U, stack_capacity> other; }; // For the straight up copy c-tor, we can share storage. StackAllocator(const StackAllocator<T, stack_capacity> &rhs) : source_(rhs.source_) {} // ISO C++ requires the following constructor to be defined, // and std::vector in VC++2008SP1 Release fails with an error // in the class _Container_base_aux_alloc_real (from <xutility>) // if the constructor does not exist. // For this constructor, we cannot share storage; there's // no guarantee that the Source buffer of Ts is large enough // for Us. // TODO: If we were fancy pants, perhaps we could share storage // iff sizeof(T) == sizeof(U). template <typename U, size_t other_capacity> StackAllocator(const StackAllocator<U, other_capacity> &other) : source_(NULL) {} explicit StackAllocator(Source *source) : source_(source) {} // Actually do the allocation. Use the stack buffer if nobody has used it yet // and the size requested fits. Otherwise, fall through to the standard // allocator. pointer allocate(size_type n, void *hint = 0) { if (source_ != NULL && !source_->used_stack_buffer_ && n <= stack_capacity) { source_->used_stack_buffer_ = true; return source_->stack_buffer(); } else { return std::allocator<T>::allocate(n, hint); } } // Free: when trying to free the stack buffer, just mark it as free. For // non-stack-buffer pointers, just fall though to the standard allocator. void deallocate(pointer p, size_type n) { if (source_ != NULL && p == source_->stack_buffer()) source_->used_stack_buffer_ = false; else std::allocator<T>::deallocate(p, n); } private: Source *source_; }; // A wrapper around STL containers that maintains a stack-sized buffer that the // initial capacity of the vector is based on. Growing the container beyond the // stack capacity will transparently overflow onto the heap. The container must // support reserve(). // // WATCH OUT: the ContainerType MUST use the proper StackAllocator for this // type. This object is really intended to be used only internally. You'll want // to use the wrappers below for different types. template <typename TContainerType, int stack_capacity> class StackContainer { public: typedef TContainerType ContainerType; typedef typename ContainerType::value_type ContainedType; typedef StackAllocator<ContainedType, stack_capacity> Allocator; // Allocator must be constructed before the container! StackContainer() : allocator_(&stack_data_), container_(allocator_) { // Make the container use the stack allocation by reserving our buffer size // before doing anything else. container_.reserve(stack_capacity); } // Getters for the actual container. // // Danger: any copies of this made using the copy constructor must have // shorter lifetimes than the source. The copy will share the same allocator // and therefore the same stack buffer as the original. Use std::copy to // copy into a "real" container for longer-lived objects. ContainerType &container() { return container_; } const ContainerType &container() const { return container_; } // Support operator-> to get to the container. This allows nicer syntax like: // StackContainer<...> foo; // std::sort(foo->begin(), foo->end()); ContainerType *operator->() { return &container_; } const ContainerType *operator->() const { return &container_; } #ifdef UNIT_TEST // Retrieves the stack source so that that unit tests can verify that the // buffer is being used properly. const typename Allocator::Source &stack_data() const { return stack_data_; } #endif protected: typename Allocator::Source stack_data_; Allocator allocator_; ContainerType container_; // DISALLOW_EVIL_CONSTRUCTORS(StackContainer); StackContainer(const StackContainer &); void operator=(const StackContainer &); }; // StackString template <size_t stack_capacity> class StackString : public StackContainer< std::basic_string<char, std::char_traits<char>, StackAllocator<char, stack_capacity> >, stack_capacity> { public: StackString() : StackContainer<std::basic_string<char, std::char_traits<char>, StackAllocator<char, stack_capacity> >, stack_capacity>() {} private: // DISALLOW_EVIL_CONSTRUCTORS(StackString); StackString(const StackString &); void operator=(const StackString &); }; // StackWString template <size_t stack_capacity> class StackWString : public StackContainer< std::basic_string<wchar_t, std::char_traits<wchar_t>, StackAllocator<wchar_t, stack_capacity> >, stack_capacity> { public: StackWString() : StackContainer< std::basic_string<wchar_t, std::char_traits<wchar_t>, StackAllocator<wchar_t, stack_capacity> >, stack_capacity>() {} private: // DISALLOW_EVIL_CONSTRUCTORS(StackWString); StackWString(const StackWString &); void operator=(const StackWString &); }; // StackVector // // Example: // StackVector<int, 16> foo; // foo->push_back(22); // we have overloaded operator-> // foo[0] = 10; // as well as operator[] template <typename T, size_t stack_capacity> class StackVector : public StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity> { public: StackVector() : StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity>() {} // We need to put this in STL containers sometimes, which requires a copy // constructor. We can't call the regular copy constructor because that will // take the stack buffer from the original. Here, we create an empty object // and make a stack buffer of its own. StackVector(const StackVector<T, stack_capacity> &other) : StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity>() { this->container().assign(other->begin(), other->end()); } StackVector<T, stack_capacity> & operator=(const StackVector<T, stack_capacity> &other) { this->container().assign(other->begin(), other->end()); return *this; } // Vectors are commonly indexed, which isn't very convenient even with // operator-> (using "->at()" does exception stuff we don't want). T &operator[](size_t i) { return this->container().operator[](i); } const T &operator[](size_t i) const { return this->container().operator[](i); } }; namespace { struct float3 { float3() {} float3(float xx, float yy, float zz) { x = xx; y = yy; z = zz; } float3(const float *p) { x = p[0]; y = p[1]; z = p[2]; } float3 operator*(float f) const { return float3(x * f, y * f, z * f); } float3 operator-(const float3 &f2) const { return float3(x - f2.x, y - f2.y, z - f2.z); } float3 operator*(const float3 &f2) const { return float3(x * f2.x, y * f2.y, z * f2.z); } float3 operator+(const float3 &f2) const { return float3(x + f2.x, y + f2.y, z + f2.z); } float3 &operator+=(const float3 &f2) { x += f2.x; y += f2.y; z += f2.z; return (*this); } float3 operator/(const float3 &f2) const { return float3(x / f2.x, y / f2.y, z / f2.z); } float operator[](int i) const { return (&x)[i]; } float &operator[](int i) { return (&x)[i]; } float3 neg() { return float3(-x, -y, -z); } float length() { return sqrtf(x * x + y * y + z * z); } void normalize() { float len = length(); if (fabs(len) > 1.0e-6f) { float inv_len = 1.0 / len; x *= inv_len; y *= inv_len; z *= inv_len; } } float x, y, z; // float pad; // for alignment }; inline float3 operator*(float f, const float3 &v) { return float3(v.x * f, v.y * f, v.z * f); } inline float3 vcross(float3 a, float3 b) { float3 c; c[0] = a[1] * b[2] - a[2] * b[1]; c[1] = a[2] * b[0] - a[0] * b[2]; c[2] = a[0] * b[1] - a[1] * b[0]; return c; } inline float vdot(float3 a, float3 b) { return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]; } } // namespace typedef struct { float t; float u; float v; unsigned int faceID; } Intersection; typedef struct { float org[3]; // must set float dir[3]; // must set float invDir[3]; // filled internally int dirSign[3]; // filled internally } Ray; class BVHNode { public: BVHNode(){}; ~BVHNode(){}; float bmin[3]; float bmax[3]; int flag; // 1 = leaf node, 0 = branch node int axis; // leaf // data[0] = npoints // data[1] = index // // branch // data[0] = child[0] // data[1] = child[1] unsigned int data[2]; }; namespace { class IsectComparator { public: bool operator()(const Intersection &a, const Intersection &b) const { return a.t < b.t; } }; // Stores furthest intersection at top typedef std::priority_queue<Intersection, std::vector<Intersection>, IsectComparator> IsectVector; template <typename T> class Matrix { public: void Print(T m[4][4]) { for (int i = 0; i < 4; i++) { printf("m[%d] = %f, %f, %f, %f\n", i, m[i][0], m[i][1], m[i][2], m[i][3]); } } void Identity(T m[4][4]) { m[0][0] = 1.0; m[0][1] = 0.0; m[0][2] = 0.0; m[0][3] = 0.0; m[1][0] = 0.0; m[1][1] = 1.0; m[1][2] = 0.0; m[1][3] = 0.0; m[2][0] = 0.0; m[2][1] = 0.0; m[2][2] = 1.0; m[2][3] = 0.0; m[3][0] = 0.0; m[3][1] = 0.0; m[3][2] = 0.0; m[3][3] = 1.0; } void Inverse(T m[4][4]) { /* * codes from intel web * cramer's rule version */ int i, j; T tmp[12]; /* tmp array for pairs */ T tsrc[16]; /* array of transpose source matrix */ T det; /* determinant */ /* transpose matrix */ for (i = 0; i < 4; i++) { tsrc[i] = m[i][0]; tsrc[i + 4] = m[i][1]; tsrc[i + 8] = m[i][2]; tsrc[i + 12] = m[i][3]; } /* calculate pair for first 8 elements(cofactors) */ tmp[0] = tsrc[10] * tsrc[15]; tmp[1] = tsrc[11] * tsrc[14]; tmp[2] = tsrc[9] * tsrc[15]; tmp[3] = tsrc[11] * tsrc[13]; tmp[4] = tsrc[9] * tsrc[14]; tmp[5] = tsrc[10] * tsrc[13]; tmp[6] = tsrc[8] * tsrc[15]; tmp[7] = tsrc[11] * tsrc[12]; tmp[8] = tsrc[8] * tsrc[14]; tmp[9] = tsrc[10] * tsrc[12]; tmp[10] = tsrc[8] * tsrc[13]; tmp[11] = tsrc[9] * tsrc[12]; /* calculate first 8 elements(cofactors) */ m[0][0] = tmp[0] * tsrc[5] + tmp[3] * tsrc[6] + tmp[4] * tsrc[7]; m[0][0] -= tmp[1] * tsrc[5] + tmp[2] * tsrc[6] + tmp[5] * tsrc[7]; m[0][1] = tmp[1] * tsrc[4] + tmp[6] * tsrc[6] + tmp[9] * tsrc[7]; m[0][1] -= tmp[0] * tsrc[4] + tmp[7] * tsrc[6] + tmp[8] * tsrc[7]; m[0][2] = tmp[2] * tsrc[4] + tmp[7] * tsrc[5] + tmp[10] * tsrc[7]; m[0][2] -= tmp[3] * tsrc[4] + tmp[6] * tsrc[5] + tmp[11] * tsrc[7]; m[0][3] = tmp[5] * tsrc[4] + tmp[8] * tsrc[5] + tmp[11] * tsrc[6]; m[0][3] -= tmp[4] * tsrc[4] + tmp[9] * tsrc[5] + tmp[10] * tsrc[6]; m[1][0] = tmp[1] * tsrc[1] + tmp[2] * tsrc[2] + tmp[5] * tsrc[3]; m[1][0] -= tmp[0] * tsrc[1] + tmp[3] * tsrc[2] + tmp[4] * tsrc[3]; m[1][1] = tmp[0] * tsrc[0] + tmp[7] * tsrc[2] + tmp[8] * tsrc[3]; m[1][1] -= tmp[1] * tsrc[0] + tmp[6] * tsrc[2] + tmp[9] * tsrc[3]; m[1][2] = tmp[3] * tsrc[0] + tmp[6] * tsrc[1] + tmp[11] * tsrc[3]; m[1][2] -= tmp[2] * tsrc[0] + tmp[7] * tsrc[1] + tmp[10] * tsrc[3]; m[1][3] = tmp[4] * tsrc[0] + tmp[9] * tsrc[1] + tmp[10] * tsrc[2]; m[1][3] -= tmp[5] * tsrc[0] + tmp[8] * tsrc[1] + tmp[11] * tsrc[2]; /* calculate pairs for second 8 elements(cofactors) */ tmp[0] = tsrc[2] * tsrc[7]; tmp[1] = tsrc[3] * tsrc[6]; tmp[2] = tsrc[1] * tsrc[7]; tmp[3] = tsrc[3] * tsrc[5]; tmp[4] = tsrc[1] * tsrc[6]; tmp[5] = tsrc[2] * tsrc[5]; tmp[6] = tsrc[0] * tsrc[7]; tmp[7] = tsrc[3] * tsrc[4]; tmp[8] = tsrc[0] * tsrc[6]; tmp[9] = tsrc[2] * tsrc[4]; tmp[10] = tsrc[0] * tsrc[5]; tmp[11] = tsrc[1] * tsrc[4]; /* calculate second 8 elements(cofactors) */ m[2][0] = tmp[0] * tsrc[13] + tmp[3] * tsrc[14] + tmp[4] * tsrc[15]; m[2][0] -= tmp[1] * tsrc[13] + tmp[2] * tsrc[14] + tmp[5] * tsrc[15]; m[2][1] = tmp[1] * tsrc[12] + tmp[6] * tsrc[14] + tmp[9] * tsrc[15]; m[2][1] -= tmp[0] * tsrc[12] + tmp[7] * tsrc[14] + tmp[8] * tsrc[15]; m[2][2] = tmp[2] * tsrc[12] + tmp[7] * tsrc[13] + tmp[10] * tsrc[15]; m[2][2] -= tmp[3] * tsrc[12] + tmp[6] * tsrc[13] + tmp[11] * tsrc[15]; m[2][3] = tmp[5] * tsrc[12] + tmp[8] * tsrc[13] + tmp[11] * tsrc[14]; m[2][3] -= tmp[4] * tsrc[12] + tmp[9] * tsrc[13] + tmp[10] * tsrc[14]; m[3][0] = tmp[2] * tsrc[10] + tmp[5] * tsrc[11] + tmp[1] * tsrc[9]; m[3][0] -= tmp[4] * tsrc[11] + tmp[0] * tsrc[9] + tmp[3] * tsrc[10]; m[3][1] = tmp[8] * tsrc[11] + tmp[0] * tsrc[8] + tmp[7] * tsrc[10]; m[3][1] -= tmp[6] * tsrc[10] + tmp[9] * tsrc[11] + tmp[1] * tsrc[8]; m[3][2] = tmp[6] * tsrc[9] + tmp[11] * tsrc[11] + tmp[3] * tsrc[8]; m[3][2] -= tmp[10] * tsrc[11] + tmp[2] * tsrc[8] + tmp[7] * tsrc[9]; m[3][3] = tmp[10] * tsrc[10] + tmp[4] * tsrc[8] + tmp[9] * tsrc[9]; m[3][3] -= tmp[8] * tsrc[9] + tmp[11] * tsrc[0] + tmp[5] * tsrc[8]; /* calculate determinant */ det = tsrc[0] * m[0][0] + tsrc[1] * m[0][1] + tsrc[2] * m[0][2] + tsrc[3] * m[0][3]; /* calculate matrix inverse */ det = 1.0 / det; for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { m[j][i] *= det; } } } void Transpose(T m[4][4]) { T t[4][4]; // Transpose for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { t[j][i] = m[i][j]; } } // Copy for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { m[j][i] = t[j][i]; } } } void Mult(T dst[4][4], const T m0[4][4], const T m1[4][4]) { for (int i = 0; i < 4; ++i) { for (int j = 0; j < 4; ++j) { dst[i][j] = 0; for (int k = 0; k < 4; ++k) { dst[i][j] += m0[k][j] * m1[i][k]; } } } } void MultV(T dst[3], const T m[4][4], const T v[3]) { T tmp[3]; tmp[0] = m[0][0] * v[0] + m[1][0] * v[1] + m[2][0] * v[2] + m[3][0]; tmp[1] = m[0][1] * v[0] + m[1][1] * v[1] + m[2][1] * v[2] + m[3][1]; tmp[2] = m[0][2] * v[0] + m[1][2] * v[1] + m[2][2] * v[2] + m[3][2]; dst[0] = tmp[0]; dst[1] = tmp[1]; dst[2] = tmp[2]; } void MultV(float3 &dst, const T m[4][4], const float3 &v) { T tmp[3]; tmp[0] = m[0][0] * v[0] + m[1][0] * v[1] + m[2][0] * v[2] + m[3][0]; tmp[1] = m[0][1] * v[0] + m[1][1] * v[1] + m[2][1] * v[2] + m[3][1]; tmp[2] = m[0][2] * v[0] + m[1][2] * v[1] + m[2][2] * v[2] + m[3][2]; dst[0] = tmp[0]; dst[1] = tmp[1]; dst[2] = tmp[2]; } }; } ///< BVH build option. struct BVHBuildOptions { float costTaabb; int minLeafPrimitives; int maxTreeDepth; int binSize; int shallowDepth; size_t minPrimitivesForParallelBuild; // Cache bounding box computation. // Requires more memory, but BVHbuild can be faster. bool cacheBBox; // Set default value: Taabb = 0.2 BVHBuildOptions() : costTaabb(0.2), minLeafPrimitives(4), maxTreeDepth(256), binSize(64), shallowDepth(3), minPrimitivesForParallelBuild(1024 * 128), cacheBBox(false) {} }; ///< BVH build statistics. class BVHBuildStatistics { public: int maxTreeDepth; int numLeafNodes; int numBranchNodes; float epsScale; double buildSecs; // Set default value: Taabb = 0.2 BVHBuildStatistics() : maxTreeDepth(0), numLeafNodes(0), numBranchNodes(0), epsScale(1.0f), buildSecs(0.0) {} }; ///< BVH trace option. class BVHTraceOptions { public: // Hit only for face IDs in indexRange. // This feature is good to mimic something like glDrawArrays() unsigned int faceIdsRange[2]; BVHTraceOptions() { faceIdsRange[0] = 0; faceIdsRange[1] = 0x7FFFFFFF; // Up to 2G face IDs. } }; class BBox { public: float bmin[3]; float bmax[3]; BBox() { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<float>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<float>::max(); } }; class BVHAccel { public: BVHAccel() : epsScale_(1.0f){}; ~BVHAccel(){}; ///< Build BVH for input mesh. bool Build(const float *vertices, const unsigned int *faces, const unsigned int numFaces, const BVHBuildOptions &options); ///< Get statistics of built BVH tree. Valid after Build() BVHBuildStatistics GetStatistics() const { return stats_; } ///< Dump built BVH to the file. bool Dump(const char *filename); /// Load BVH binary bool Load(const char *filename); ///< Traverse into BVH along ray and find closest hit point if found bool Traverse(Intersection &isect, const float *vertices, const unsigned int *faces, const Ray &ray, const BVHTraceOptions& options); ///< Multi-hit ray tracversal ///< Returns `maxIntersections` frontmost intersections bool MultiHitTraverse(StackVector<Intersection, 128> &isects, int maxIntersections, const float *vertices, const unsigned int *faces, Ray &ray); const std::vector<BVHNode> &GetNodes() const { return nodes_; } const std::vector<unsigned int> &GetIndices() const { return indices_; } void BoundingBox(float bmin[3], float bmax[3]) const { if (nodes_.empty()) { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<float>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<float>::max(); } else { bmin[0] = nodes_[0].bmin[0]; bmin[1] = nodes_[0].bmin[1]; bmin[2] = nodes_[0].bmin[2]; bmax[0] = nodes_[0].bmax[0]; bmax[1] = nodes_[0].bmax[1]; bmax[2] = nodes_[0].bmax[2]; } } private: #if NANORT_ENABLE_PARALLEL_BUILD typedef struct { unsigned int leftIdx; unsigned int rightIdx; unsigned int offset; } ShallowNodeInfo; // Used only during BVH construction std::vector<ShallowNodeInfo> shallowNodeInfos_; ///< Builds shallow BVH tree recursively. unsigned int BuildShallowTree(std::vector<BVHNode> &outNodes, const float *vertices, const unsigned int *faces, unsigned int leftIdx, unsigned int rightIdx, int depth, int maxShallowDepth, float epsScale); #endif ///< Builds BVH tree recursively. size_t BuildTree(BVHBuildStatistics &outStat, std::vector<BVHNode> &outNodes, const float *vertices, const unsigned int *faces, unsigned int leftIdx, unsigned int rightIdx, int depth, float epsScale); BVHBuildOptions options_; std::vector<BVHNode> nodes_; std::vector<unsigned int> indices_; // max 4G triangles. BVHBuildStatistics stats_; float epsScale_; std::vector<BBox> bboxes_; }; #if 0 class BVHBox { } class Scene { std::vector<BVHBox> nodes_; }; #endif } // namespace nanort #ifdef NANORT_IMPLEMENTATION #include <limits> #include <cassert> #include <algorithm> #include <functional> // // SAH functions // namespace nanort { struct BinBuffer { BinBuffer(int size) { binSize = size; bin.resize(2 * 3 * size); clear(); } void clear() { memset(&bin[0], 0, sizeof(size_t) * 2 * 3 * binSize); } std::vector<size_t> bin; // (min, max) * xyz * binsize int binSize; }; inline float CalculateSurfaceArea(const float3 &min, const float3 &max) { float3 box = max - min; return 2.0 * (box[0] * box[1] + box[1] * box[2] + box[2] * box[0]); } inline void GetBoundingBoxOfTriangle(float3 &bmin, float3 &bmax, const float *vertices, const unsigned int *faces, unsigned int index) { unsigned int f0 = faces[3 * index + 0]; unsigned int f1 = faces[3 * index + 1]; unsigned int f2 = faces[3 * index + 2]; float3 p[3]; p[0] = float3(&vertices[3 * f0]); p[1] = float3(&vertices[3 * f1]); p[2] = float3(&vertices[3 * f2]); bmin = p[0]; bmax = p[0]; for (int i = 1; i < 3; i++) { bmin[0] = std::min(bmin[0], p[i][0]); bmin[1] = std::min(bmin[1], p[i][1]); bmin[2] = std::min(bmin[2], p[i][2]); bmax[0] = std::max(bmax[0], p[i][0]); bmax[1] = std::max(bmax[1], p[i][1]); bmax[2] = std::max(bmax[2], p[i][2]); } } void ContributeBinBuffer(BinBuffer *bins, // [out] const float3 &sceneMin, const float3 &sceneMax, const float *vertices, const unsigned int *faces, unsigned int *indices, unsigned int leftIdx, unsigned int rightIdx, float epsScale) { const float kEPS = std::numeric_limits<float>::epsilon() * epsScale; float binSize = (float)bins->binSize; // Calculate extent float3 sceneSize, sceneInvSize; sceneSize = sceneMax - sceneMin; for (int i = 0; i < 3; ++i) { assert(sceneSize[i] >= 0.0); if (sceneSize[i] > kEPS) { sceneInvSize[i] = binSize / sceneSize[i]; } else { sceneInvSize[i] = 0.0; } } // Clear bin data std::fill(bins->bin.begin(), bins->bin.end(), 0); // memset(&bins->bin[0], 0, sizeof(2 * 3 * bins->binSize)); size_t idxBMin[3]; size_t idxBMax[3]; for (size_t i = leftIdx; i < rightIdx; i++) { // // Quantize the position into [0, BIN_SIZE) // // q[i] = (int)(p[i] - scene_bmin) / scene_size // float3 bmin; float3 bmax; GetBoundingBoxOfTriangle(bmin, bmax, vertices, faces, indices[i]); float3 quantizedBMin = (bmin - sceneMin) * sceneInvSize; float3 quantizedBMax = (bmax - sceneMin) * sceneInvSize; // idx is now in [0, BIN_SIZE) for (size_t j = 0; j < 3; ++j) { int q0 = (int)quantizedBMin[j]; if (q0 < 0) q0 = 0; int q1 = (int)quantizedBMax[j]; if (q1 < 0) q1 = 0; idxBMin[j] = (unsigned int)q0; idxBMax[j] = (unsigned int)q1; if (idxBMin[j] >= binSize) idxBMin[j] = binSize - 1; if (idxBMax[j] >= binSize) idxBMax[j] = binSize - 1; assert(idxBMin[j] < binSize); assert(idxBMax[j] < binSize); // Increment bin counter bins->bin[0 * (bins->binSize * 3) + j * bins->binSize + idxBMin[j]] += 1; bins->bin[1 * (bins->binSize * 3) + j * bins->binSize + idxBMax[j]] += 1; } } } inline float SAH(size_t ns1, float leftArea, size_t ns2, float rightArea, float invS, float Taabb, float Ttri) { // const float Taabb = 0.2f; // const float Ttri = 0.8f; float T; T = 2.0f * Taabb + (leftArea * invS) * (float)(ns1)*Ttri + (rightArea * invS) * (float)(ns2)*Ttri; return T; } bool FindCutFromBinBuffer(float *cutPos, // [out] xyz int &minCostAxis, // [out] const BinBuffer *bins, const float3 &bmin, const float3 &bmax, size_t numTriangles, float costTaabb, // should be in [0.0, 1.0] float epsScale) { const float eps = std::numeric_limits<float>::epsilon() * epsScale; size_t left, right; float3 bsize, bstep; float3 bminLeft, bmaxLeft; float3 bminRight, bmaxRight; float saLeft, saRight, saTotal; float pos; float minCost[3]; float costTtri = 1.0 - costTaabb; minCostAxis = 0; bsize = bmax - bmin; bstep = bsize * (1.0 / bins->binSize); saTotal = CalculateSurfaceArea(bmin, bmax); float invSaTotal = 0.0; if (saTotal > eps) { invSaTotal = 1.0 / saTotal; } for (int j = 0; j < 3; ++j) { // // Compute SAH cost for right side of each cell of the bbox. // Exclude both extreme side of the bbox. // // i: 0 1 2 3 // +----+----+----+----+----+ // | | | | | | // +----+----+----+----+----+ // float minCostPos = bmin[j] + 0.5 * bstep[j]; minCost[j] = std::numeric_limits<float>::max(); left = 0; right = numTriangles; bminLeft = bminRight = bmin; bmaxLeft = bmaxRight = bmax; for (int i = 0; i < bins->binSize - 1; ++i) { left += bins->bin[0 * (3 * bins->binSize) + j * bins->binSize + i]; right -= bins->bin[1 * (3 * bins->binSize) + j * bins->binSize + i]; assert(left <= numTriangles); assert(right <= numTriangles); // // Split pos bmin + (i + 1) * (bsize / BIN_SIZE) // +1 for i since we want a position on right side of the cell. // pos = bmin[j] + (i + 0.5) * bstep[j]; bmaxLeft[j] = pos; bminRight[j] = pos; saLeft = CalculateSurfaceArea(bminLeft, bmaxLeft); saRight = CalculateSurfaceArea(bminRight, bmaxRight); float cost = SAH(left, saLeft, right, saRight, invSaTotal, costTaabb, costTtri); if (cost < minCost[j]) { // // Update the min cost // minCost[j] = cost; minCostPos = pos; // minCostAxis = j; } } cutPos[j] = minCostPos; } // cutAxis = minCostAxis; // cutPos = minCostPos; // Find min cost axis float cost = minCost[0]; minCostAxis = 0; if (cost > minCost[1]) { minCostAxis = 1; cost = minCost[1]; } if (cost > minCost[2]) { minCostAxis = 2; cost = minCost[2]; } return true; } class SAHPred : public std::unary_function<unsigned int, bool> { public: SAHPred(int axis, float pos, const float *vertices, const unsigned int *faces) : axis_(axis), pos_(pos), vertices_(vertices), faces_(faces) {} bool operator()(unsigned int i) const { int axis = axis_; float pos = pos_; unsigned int i0 = faces_[3 * i + 0]; unsigned int i1 = faces_[3 * i + 1]; unsigned int i2 = faces_[3 * i + 2]; float3 p0(&vertices_[3 * i0]); float3 p1(&vertices_[3 * i1]); float3 p2(&vertices_[3 * i2]); float center = p0[axis] + p1[axis] + p2[axis]; return (center < pos * 3.0f); } private: int axis_; float pos_; const float *vertices_; const unsigned int *faces_; }; #ifdef _OPENMP void ComputeBoundingBoxOMP(float3 &bmin, float3 &bmax, const float *vertices, const unsigned int *faces, unsigned int *indices, unsigned int leftIndex, unsigned int rightIndex, float epsScale) { const float kEPS = std::numeric_limits<float>::epsilon() * epsScale; long long i = leftIndex; long long idx = indices[i]; long long n = rightIndex - leftIndex; bmin[0] = vertices[3 * faces[3 * idx + 0] + 0] - kEPS; bmin[1] = vertices[3 * faces[3 * idx + 0] + 1] - kEPS; bmin[2] = vertices[3 * faces[3 * idx + 0] + 2] - kEPS; bmax[0] = vertices[3 * faces[3 * idx + 0] + 0] + kEPS; bmax[1] = vertices[3 * faces[3 * idx + 0] + 1] + kEPS; bmax[2] = vertices[3 * faces[3 * idx + 0] + 2] + kEPS; float local_bmin[3] = {bmin[0], bmin[1], bmin[2]}; float local_bmax[3] = {bmax[0], bmax[1], bmax[2]}; #pragma omp parallel firstprivate(local_bmin, local_bmax) if (n > (1024 * 128)) { #pragma omp for for (i = leftIndex; i < rightIndex; i++) { // for each faces size_t idx = indices[i]; for (int j = 0; j < 3; j++) { // for each face vertex size_t fid = faces[3 * idx + j]; for (int k = 0; k < 3; k++) { // xyz float minval = vertices[3 * fid + k] - kEPS; float maxval = vertices[3 * fid + k] + kEPS; if (local_bmin[k] > minval) local_bmin[k] = minval; if (local_bmax[k] < maxval) local_bmax[k] = maxval; } } } #pragma omp critical { for (int k = 0; k < 3; k++) { if (local_bmin[k] < bmin[k]) { { if (local_bmin[k] < bmin[k]) bmin[k] = local_bmin[k]; } } if (local_bmax[k] > bmax[k]) { { if (local_bmax[k] > bmax[k]) bmax[k] = local_bmax[k]; } } } } } } #endif void ComputeBoundingBox(float3 &bmin, float3 &bmax, const float *vertices, const unsigned int *faces, unsigned int *indices, unsigned int leftIndex, unsigned int rightIndex, float epsScale) { const float kEPS = std::numeric_limits<float>::epsilon() * epsScale; long long i = leftIndex; long long idx = indices[i]; bmin[0] = vertices[3 * faces[3 * idx + 0] + 0] - kEPS; bmin[1] = vertices[3 * faces[3 * idx + 0] + 1] - kEPS; bmin[2] = vertices[3 * faces[3 * idx + 0] + 2] - kEPS; bmax[0] = vertices[3 * faces[3 * idx + 0] + 0] + kEPS; bmax[1] = vertices[3 * faces[3 * idx + 0] + 1] + kEPS; bmax[2] = vertices[3 * faces[3 * idx + 0] + 2] + kEPS; float local_bmin[3] = {bmin[0], bmin[1], bmin[2]}; float local_bmax[3] = {bmax[0], bmax[1], bmax[2]}; { for (i = leftIndex; i < rightIndex; i++) { // for each faces size_t idx = indices[i]; for (int j = 0; j < 3; j++) { // for each face vertex size_t fid = faces[3 * idx + j]; for (int k = 0; k < 3; k++) { // xyz float minval = vertices[3 * fid + k] - kEPS; float maxval = vertices[3 * fid + k] + kEPS; if (local_bmin[k] > minval) local_bmin[k] = minval; if (local_bmax[k] < maxval) local_bmax[k] = maxval; } } } for (int k = 0; k < 3; k++) { bmin[k] = local_bmin[k]; bmax[k] = local_bmax[k]; } } } void GetBoundingBox(float3 &bmin, float3 &bmax, std::vector<BBox> &bboxes, unsigned int *indices, unsigned int leftIndex, unsigned int rightIndex, float epsScale) { const float kEPS = std::numeric_limits<float>::epsilon() * epsScale; long long i = leftIndex; long long idx = indices[i]; bmin[0] = bboxes[idx].bmin[0] - kEPS; bmin[1] = bboxes[idx].bmin[1] - kEPS; bmin[2] = bboxes[idx].bmin[2] - kEPS; bmax[0] = bboxes[idx].bmax[0] + kEPS; bmax[1] = bboxes[idx].bmax[1] + kEPS; bmax[2] = bboxes[idx].bmax[2] + kEPS; float local_bmin[3] = {bmin[0], bmin[1], bmin[2]}; float local_bmax[3] = {bmax[0], bmax[1], bmax[2]}; { for (i = leftIndex; i < rightIndex; i++) { // for each faces size_t idx = indices[i]; for (int k = 0; k < 3; k++) { // xyz float minval = bboxes[idx].bmin[k] - kEPS; float maxval = bboxes[idx].bmax[k] + kEPS; if (local_bmin[k] > minval) local_bmin[k] = minval; if (local_bmax[k] < maxval) local_bmax[k] = maxval; } } for (int k = 0; k < 3; k++) { bmin[k] = local_bmin[k]; bmax[k] = local_bmax[k]; } } } // // -- // #if NANORT_ENABLE_PARALLEL_BUILD unsigned int BVHAccel::BuildShallowTree(std::vector<BVHNode> &outNodes, const float *vertices, const unsigned int *faces, unsigned int leftIdx, unsigned int rightIdx, int depth, int maxShallowDepth, float epsScale) { assert(leftIdx <= rightIdx); unsigned int offset = outNodes.size(); if (stats_.maxTreeDepth < depth) { stats_.maxTreeDepth = depth; } float3 bmin, bmax; ComputeBoundingBox(bmin, bmax, vertices, faces, &indices_.at(0), leftIdx, rightIdx, epsScale); long long n = rightIdx - leftIdx; if ((n < options_.minLeafPrimitives) || (depth >= options_.maxTreeDepth)) { // Create leaf node. BVHNode leaf; leaf.bmin[0] = bmin[0]; leaf.bmin[1] = bmin[1]; leaf.bmin[2] = bmin[2]; leaf.bmax[0] = bmax[0]; leaf.bmax[1] = bmax[1]; leaf.bmax[2] = bmax[2]; assert(leftIdx < std::numeric_limits<unsigned int>::max()); leaf.flag = 1; // leaf leaf.data[0] = n; leaf.data[1] = (unsigned int)leftIdx; outNodes.push_back(leaf); // atomic update stats_.numLeafNodes++; return offset; } // // Create branch node. // if (depth >= maxShallowDepth) { // Delay to build tree ShallowNodeInfo info; info.leftIdx = leftIdx; info.rightIdx = rightIdx; info.offset = offset; shallowNodeInfos_.push_back(info); // Add dummy node. BVHNode node; node.axis = -1; node.flag = -1; outNodes.push_back(node); return offset; } else { // // Compute SAH and find best split axis and position // int minCutAxis = 0; float cutPos[3] = {0.0, 0.0, 0.0}; BinBuffer bins(options_.binSize); ContributeBinBuffer(&bins, bmin, bmax, vertices, faces, &indices_.at(0), leftIdx, rightIdx, epsScale); FindCutFromBinBuffer(cutPos, minCutAxis, &bins, bmin, bmax, n, options_.costTaabb, epsScale); // Try all 3 axis until good cut position avaiable. unsigned int midIdx; int cutAxis = minCutAxis; for (int axisTry = 0; axisTry < 1; axisTry++) { unsigned int *begin = &indices_[leftIdx]; unsigned int *end = &indices_[rightIdx - 1] + 1; // mimics end() iterator. unsigned int *mid = 0; // try minCutAxis first. cutAxis = (minCutAxis + axisTry) % 3; // // Split at (cutAxis, cutPos) // indices_ will be modified. // mid = std::partition(begin, end, SAHPred(cutAxis, cutPos[cutAxis], vertices, faces)); midIdx = leftIdx + (mid - begin); if ((midIdx == leftIdx) || (midIdx == rightIdx)) { // Can't split well. // Switch to object median(which may create unoptimized tree, but // stable) midIdx = leftIdx + (n >> 1); // Try another axis if there's axis to try. } else { // Found good cut. exit loop. break; } } BVHNode node; node.axis = cutAxis; node.flag = 0; // 0 = branch outNodes.push_back(node); unsigned int leftChildIndex = 0; unsigned int rightChildIndex = 0; leftChildIndex = BuildShallowTree(outNodes, vertices, faces, leftIdx, midIdx, depth + 1, maxShallowDepth, epsScale); rightChildIndex = BuildShallowTree(outNodes, vertices, faces, midIdx, rightIdx, depth + 1, maxShallowDepth, epsScale); if ((leftChildIndex != (unsigned int)(-1)) && (rightChildIndex != (unsigned int)(-1))) { outNodes[offset].data[0] = leftChildIndex; outNodes[offset].data[1] = rightChildIndex; outNodes[offset].bmin[0] = bmin[0]; outNodes[offset].bmin[1] = bmin[1]; outNodes[offset].bmin[2] = bmin[2]; outNodes[offset].bmax[0] = bmax[0]; outNodes[offset].bmax[1] = bmax[1]; outNodes[offset].bmax[2] = bmax[2]; } else { if ((leftChildIndex == (unsigned int)(-1)) && (rightChildIndex != (unsigned int)(-1))) { fprintf(stderr, "??? : %u, %u\n", leftChildIndex, rightChildIndex); exit(-1); } else if ((leftChildIndex != (unsigned int)(-1)) && (rightChildIndex == (unsigned int)(-1))) { fprintf(stderr, "??? : %u, %u\n", leftChildIndex, rightChildIndex); exit(-1); } } } stats_.numBranchNodes++; return offset; } #endif size_t BVHAccel::BuildTree(BVHBuildStatistics &outStat, std::vector<BVHNode> &outNodes, const float *vertices, const unsigned int *faces, unsigned int leftIdx, unsigned int rightIdx, int depth, float epsScale) { assert(leftIdx <= rightIdx); size_t offset = outNodes.size(); if (outStat.maxTreeDepth < depth) { outStat.maxTreeDepth = depth; } float3 bmin, bmax; if (!bboxes_.empty()) { GetBoundingBox(bmin, bmax, bboxes_, &indices_.at(0), leftIdx, rightIdx, epsScale); } else { ComputeBoundingBox(bmin, bmax, vertices, faces, &indices_.at(0), leftIdx, rightIdx, epsScale); } long long n = rightIdx - leftIdx; if ((n < options_.minLeafPrimitives) || (depth >= options_.maxTreeDepth)) { // Create leaf node. BVHNode leaf; leaf.bmin[0] = bmin[0]; leaf.bmin[1] = bmin[1]; leaf.bmin[2] = bmin[2]; leaf.bmax[0] = bmax[0]; leaf.bmax[1] = bmax[1]; leaf.bmax[2] = bmax[2]; assert(leftIdx < std::numeric_limits<unsigned int>::max()); leaf.flag = 1; // leaf leaf.data[0] = n; leaf.data[1] = (unsigned int)leftIdx; outNodes.push_back(leaf); // atomic update outStat.numLeafNodes++; return offset; } // // Create branch node. // // // Compute SAH and find best split axis and position // int minCutAxis = 0; float cutPos[3] = {0.0, 0.0, 0.0}; BinBuffer bins(options_.binSize); ContributeBinBuffer(&bins, bmin, bmax, vertices, faces, &indices_.at(0), leftIdx, rightIdx, epsScale); FindCutFromBinBuffer(cutPos, minCutAxis, &bins, bmin, bmax, n, options_.costTaabb, epsScale); // Try all 3 axis until good cut position avaiable. unsigned int midIdx; int cutAxis = minCutAxis; for (int axisTry = 0; axisTry < 1; axisTry++) { unsigned int *begin = &indices_[leftIdx]; unsigned int *end = &indices_[rightIdx - 1] + 1; // mimics end() iterator. unsigned int *mid = 0; // try minCutAxis first. cutAxis = (minCutAxis + axisTry) % 3; // // Split at (cutAxis, cutPos) // indices_ will be modified. // mid = std::partition(begin, end, SAHPred(cutAxis, cutPos[cutAxis], vertices, faces)); midIdx = leftIdx + (mid - begin); if ((midIdx == leftIdx) || (midIdx == rightIdx)) { // Can't split well. // Switch to object median(which may create unoptimized tree, but // stable) midIdx = leftIdx + (n >> 1); // Try another axis if there's axis to try. } else { // Found good cut. exit loop. break; } } BVHNode node; node.axis = cutAxis; node.flag = 0; // 0 = branch outNodes.push_back(node); // atomic update unsigned int leftChildIndex = 0; unsigned int rightChildIndex = 0; leftChildIndex = BuildTree(outStat, outNodes, vertices, faces, leftIdx, midIdx, depth + 1, epsScale); rightChildIndex = BuildTree(outStat, outNodes, vertices, faces, midIdx, rightIdx, depth + 1, epsScale); { outNodes[offset].data[0] = leftChildIndex; outNodes[offset].data[1] = rightChildIndex; outNodes[offset].bmin[0] = bmin[0]; outNodes[offset].bmin[1] = bmin[1]; outNodes[offset].bmin[2] = bmin[2]; outNodes[offset].bmax[0] = bmax[0]; outNodes[offset].bmax[1] = bmax[1]; outNodes[offset].bmax[2] = bmax[2]; } outStat.numBranchNodes++; return offset; } bool BVHAccel::Build(const float *vertices, const unsigned int *faces, unsigned int numFaces, const BVHBuildOptions &options) { options_ = options; stats_ = BVHBuildStatistics(); assert(options_.binSize > 1); size_t n = numFaces; // // 1. Create triangle indices(this will be permutated in BuildTree) // indices_.resize(n); #ifdef _OPENMP #pragma omp parallel for #endif for (long long i = 0; i < (long long)n; i++) { indices_[i] = i; } // // 2. Compute bounding box to find scene scale. // float epsScale = 1.0f; float3 bmin, bmax; if (options.cacheBBox) { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<float>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<float>::max(); bboxes_.resize(n); for (size_t i = 0; i < n; i++) { // for each faces size_t idx = indices_[i]; BBox bbox; for (int j = 0; j < 3; j++) { // for each face vertex size_t fid = faces[3 * idx + j]; for (int k = 0; k < 3; k++) { // xyz float minval = vertices[3 * fid + k]; float maxval = vertices[3 * fid + k]; if (bbox.bmin[k] > minval) { bbox.bmin[k] = minval; } if (bbox.bmax[k] < maxval) { bbox.bmax[k] = maxval; } } } bboxes_[idx] = bbox; for (int k = 0; k < 3; k++) { // xyz if (bmin[k] > bbox.bmin[k]) { bmin[k] = bbox.bmin[k]; } if (bmax[k] < bbox.bmax[k]) { bmax[k] = bbox.bmax[k]; } } } } else { #ifdef _OPENMP ComputeBoundingBoxOMP(bmin, bmax, vertices, faces, &indices_.at(0), 0, n, epsScale); #else ComputeBoundingBox(bmin, bmax, vertices, faces, &indices_.at(0), 0, n, epsScale); #endif } // Find max float3 bsize = bmax - bmin; epsScale = std::abs(bsize[0]); if (epsScale < std::abs(bsize[1])) { epsScale = std::abs(bsize[1]); } if (epsScale < std::abs(bsize[2])) { epsScale = std::abs(bsize[2]); } // // 3. Build tree // #ifdef _OPENMP #if NANORT_ENABLE_PARALLEL_BUILD // Do parallel build for enoughly large dataset. if (n > options.minPrimitivesForParallelBuild) { BuildShallowTree(nodes_, vertices, faces, 0, n, /* root depth */ 0, options.shallowDepth, epsScale); // [0, n) assert(shallowNodeInfos_.size() > 0); // Build deeper tree in parallel std::vector<std::vector<BVHNode> > local_nodes(shallowNodeInfos_.size()); std::vector<BVHBuildStatistics> local_stats(shallowNodeInfos_.size()); #pragma omp parallel for for (int i = 0; i < (int)shallowNodeInfos_.size(); i++) { unsigned int leftIdx = shallowNodeInfos_[i].leftIdx; unsigned int rightIdx = shallowNodeInfos_[i].rightIdx; BuildTree(local_stats[i], local_nodes[i], vertices, faces, leftIdx, rightIdx, options.shallowDepth, epsScale); } // Join local nodes for (int i = 0; i < (int)local_nodes.size(); i++) { assert(!local_nodes[i].empty()); size_t offset = nodes_.size(); // Add offset to child index(for branch node). for (size_t j = 0; j < local_nodes[i].size(); j++) { if (local_nodes[i][j].flag == 0) { // branch local_nodes[i][j].data[0] += offset - 1; local_nodes[i][j].data[1] += offset - 1; } } // replace nodes_[shallowNodeInfos_[i].offset] = local_nodes[i][0]; // Skip root element of the local node. nodes_.insert(nodes_.end(), local_nodes[i].begin() + 1, local_nodes[i].end()); } // Join statistics for (int i = 0; i < (int)local_nodes.size(); i++) { stats_.maxTreeDepth = std::max(stats_.maxTreeDepth, local_stats[i].maxTreeDepth); stats_.numLeafNodes += local_stats[i].numLeafNodes; stats_.numBranchNodes += local_stats[i].numBranchNodes; } } else { BuildTree(stats_, nodes_, vertices, faces, 0, n, /* root depth */ 0, epsScale); // [0, n) } #else // !NANORT_ENABLE_PARALLEL_BUILD { BuildTree(stats_, nodes_, vertices, faces, 0, n, /* root depth */ 0, epsScale); // [0, n) } #endif #else // !_OPENMP { BuildTree(stats_, nodes_, vertices, faces, 0, n, /* root depth */ 0, epsScale); // [0, n) } #endif stats_.epsScale = epsScale; epsScale_ = epsScale; return true; } bool BVHAccel::Dump(const char *filename) { FILE *fp = fopen(filename, "wb"); if (!fp) { fprintf(stderr, "[BVHAccel] Cannot write a file: %s\n", filename); return false; } unsigned long long numNodes = nodes_.size(); assert(nodes_.size() > 0); unsigned long long numIndices = indices_.size(); size_t r = 0; r = fwrite(&numNodes, sizeof(unsigned long long), 1, fp); assert(r == 1); r = fwrite(&nodes_.at(0), sizeof(BVHNode), numNodes, fp); assert(r == numNodes); r = fwrite(&numIndices, sizeof(unsigned long long), 1, fp); assert(r == 1); r = fwrite(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); fclose(fp); return true; } bool BVHAccel::Load(const char *filename) { FILE *fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Cannot open file: %s\n", filename); return false; } unsigned long long numNodes; unsigned long long numIndices; size_t r = 0; r = fread(&numNodes, sizeof(unsigned long long), 1, fp); assert(r == 1); assert(numNodes > 0); nodes_.resize(numNodes); r = fread(&nodes_.at(0), sizeof(BVHNode), numNodes, fp); assert(r == numNodes); r = fread(&numIndices, sizeof(unsigned long long), 1, fp); assert(r == 1); indices_.resize(numIndices); r = fread(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); fclose(fp); return true; } namespace { const int kMaxStackDepth = 512; inline bool IntersectRayAABB(float &tminOut, // [out] float &tmaxOut, // [out] float maxT, float bmin[3], float bmax[3], float3 rayOrg, float3 rayInvDir, int rayDirSign[3]) { float tmin, tmax; const float min_x = rayDirSign[0] ? bmax[0] : bmin[0]; const float min_y = rayDirSign[1] ? bmax[1] : bmin[1]; const float min_z = rayDirSign[2] ? bmax[2] : bmin[2]; const float max_x = rayDirSign[0] ? bmin[0] : bmax[0]; const float max_y = rayDirSign[1] ? bmin[1] : bmax[1]; const float max_z = rayDirSign[2] ? bmin[2] : bmax[2]; // X const float tmin_x = (min_x - rayOrg[0]) * rayInvDir[0]; const float tmax_x = (max_x - rayOrg[0]) * rayInvDir[0]; // Y const float tmin_y = (min_y - rayOrg[1]) * rayInvDir[1]; const float tmax_y = (max_y - rayOrg[1]) * rayInvDir[1]; tmin = (tmin_x > tmin_y) ? tmin_x : tmin_y; tmax = (tmax_x < tmax_y) ? tmax_x : tmax_y; // Z const float tmin_z = (min_z - rayOrg[2]) * rayInvDir[2]; const float tmax_z = (max_z - rayOrg[2]) * rayInvDir[2]; tmin = (tmin > tmin_z) ? tmin : tmin_z; tmax = (tmax < tmax_z) ? tmax : tmax_z; // // Hit include (tmin == tmax) edge case(hit 2D plane). // if ((tmax > 0.0) && (tmin <= tmax) && (tmin <= maxT)) { tminOut = tmin; tmaxOut = tmax; return true; } return false; // no hit } inline bool TriangleIsect(float &tInOut, float &uOut, float &vOut, const float3 &v0, const float3 &v1, const float3 &v2, const float3 &rayOrg, const float3 &rayDir, float epsScale) { const float kEPS = std::numeric_limits<float>::epsilon() * epsScale; float3 p0(v0[0], v0[1], v0[2]); float3 p1(v1[0], v1[1], v1[2]); float3 p2(v2[0], v2[1], v2[2]); float3 e1, e2; float3 p, s, q; e1 = p1 - p0; e2 = p2 - p0; p = vcross(rayDir, e2); float invDet; float det = vdot(e1, p); if (std::abs(det) < kEPS) { // no-cull return false; } invDet = 1.0 / det; s = rayOrg - p0; q = vcross(s, e1); float u = vdot(s, p) * invDet; float v = vdot(q, rayDir) * invDet; float t = vdot(e2, q) * invDet; if (u < 0.0 || u > 1.0) return false; if (v < 0.0 || u + v > 1.0) return false; if (t < 0.0 || t > tInOut) return false; tInOut = t; uOut = u; vOut = v; return true; } inline bool TestLeafNode(Intersection &isect, // [inout] const BVHNode &node, const std::vector<unsigned int> &indices, const float *vertices, const unsigned int *faces, const Ray &ray, float epsScale, const BVHTraceOptions& traceOptions) { bool hit = false; unsigned int numTriangles = node.data[0]; unsigned int offset = node.data[1]; float t = isect.t; // current hit distance float3 rayOrg; rayOrg[0] = ray.org[0]; rayOrg[1] = ray.org[1]; rayOrg[2] = ray.org[2]; float3 rayDir; rayDir[0] = ray.dir[0]; rayDir[1] = ray.dir[1]; rayDir[2] = ray.dir[2]; for (unsigned int i = 0; i < numTriangles; i++) { int faceIdx = indices[i + offset]; if ((faceIdx < traceOptions.faceIdsRange[0]) || (faceIdx >= traceOptions.faceIdsRange[1])) { continue; } int f0 = faces[3 * faceIdx + 0]; int f1 = faces[3 * faceIdx + 1]; int f2 = faces[3 * faceIdx + 2]; float3 v0, v1, v2; v0[0] = vertices[3 * f0 + 0]; v0[1] = vertices[3 * f0 + 1]; v0[2] = vertices[3 * f0 + 2]; v1[0] = vertices[3 * f1 + 0]; v1[1] = vertices[3 * f1 + 1]; v1[2] = vertices[3 * f1 + 2]; v2[0] = vertices[3 * f2 + 0]; v2[1] = vertices[3 * f2 + 1]; v2[2] = vertices[3 * f2 + 2]; float u, v; if (TriangleIsect(t, u, v, v0, v1, v2, rayOrg, rayDir, epsScale)) { // Update isect state isect.t = t; isect.u = u; isect.v = v; isect.faceID = faceIdx; hit = true; } } return hit; } inline bool MultiHitTestLeafNode(IsectVector &isects, // [inout] int maxIntersections, const BVHNode &node, const std::vector<unsigned int> &indices, const float *vertices, const unsigned int *faces, const Ray &ray, float epsScale) { bool hit = false; unsigned int numTriangles = node.data[0]; unsigned int offset = node.data[1]; float t = std::numeric_limits<float>::max(); if (isects.size() >= (size_t)maxIntersections) { t = isects.top().t; // current furthest hit distance } float3 rayOrg; rayOrg[0] = ray.org[0]; rayOrg[1] = ray.org[1]; rayOrg[2] = ray.org[2]; float3 rayDir; rayDir[0] = ray.dir[0]; rayDir[1] = ray.dir[1]; rayDir[2] = ray.dir[2]; for (unsigned int i = 0; i < numTriangles; i++) { int faceIdx = indices[i + offset]; int f0 = faces[3 * faceIdx + 0]; int f1 = faces[3 * faceIdx + 1]; int f2 = faces[3 * faceIdx + 2]; float3 v0, v1, v2; v0[0] = vertices[3 * f0 + 0]; v0[1] = vertices[3 * f0 + 1]; v0[2] = vertices[3 * f0 + 2]; v1[0] = vertices[3 * f1 + 0]; v1[1] = vertices[3 * f1 + 1]; v1[2] = vertices[3 * f1 + 2]; v2[0] = vertices[3 * f2 + 0]; v2[1] = vertices[3 * f2 + 1]; v2[2] = vertices[3 * f2 + 2]; float u, v; if (TriangleIsect(t, u, v, v0, v1, v2, rayOrg, rayDir, epsScale)) { // Update isect state if (isects.size() < (size_t)maxIntersections) { Intersection isect; isect.t = t; isect.u = u; isect.v = v; isect.faceID = faceIdx; isects.push(isect); // Update furthest distance to far. t = std::numeric_limits<float>::max(); hit = true; } else { if (t < isects.top().t) { // delete furthest intersection and add new intersection. isects.pop(); Intersection isect; isect.t = t; isect.u = u; isect.v = v; isect.faceID = faceIdx; isects.push(isect); // Update furthest hit distance t = isects.top().t; hit = true; } } } } return hit; } } // namespace bool BVHAccel::Traverse(Intersection &isect, const float *vertices, const unsigned int *faces, const Ray &ray, const BVHTraceOptions& options) { float hitT = std::numeric_limits<float>::max(); // far = no hit. int nodeStackIndex = 0; int nodeStack[512]; nodeStack[0] = 0; // Init isect info as no hit isect.t = hitT; isect.u = 0.0; isect.v = 0.0; isect.faceID = -1; int dirSign[3]; dirSign[0] = ray.dir[0] < 0.0 ? 1 : 0; dirSign[1] = ray.dir[1] < 0.0 ? 1 : 0; dirSign[2] = ray.dir[2] < 0.0 ? 1 : 0; // @fixme { Check edge case; i.e., 1/0 } float3 rayInvDir; rayInvDir[0] = 1.0 / ray.dir[0]; rayInvDir[1] = 1.0 / ray.dir[1]; rayInvDir[2] = 1.0 / ray.dir[2]; float3 rayOrg; rayOrg[0] = ray.org[0]; rayOrg[1] = ray.org[1]; rayOrg[2] = ray.org[2]; float minT, maxT; while (nodeStackIndex >= 0) { int index = nodeStack[nodeStackIndex]; BVHNode &node = nodes_[index]; nodeStackIndex--; bool hit = IntersectRayAABB(minT, maxT, hitT, node.bmin, node.bmax, rayOrg, rayInvDir, dirSign); if (node.flag == 0) { // branch node if (hit) { int orderNear = dirSign[node.axis]; int orderFar = 1 - orderNear; // Traverse near first. nodeStack[++nodeStackIndex] = node.data[orderFar]; nodeStack[++nodeStackIndex] = node.data[orderNear]; } } else { // leaf node if (hit) { if (TestLeafNode(isect, node, indices_, vertices, faces, ray, epsScale_, options)) { hitT = isect.t; } } } } assert(nodeStackIndex < kMaxStackDepth); if (isect.t < std::numeric_limits<float>::max()) { return true; } return false; } bool BVHAccel::MultiHitTraverse(StackVector<Intersection, 128> &isects, int maxIntersections, const float *vertices, const unsigned int *faces, Ray &ray) { float hitT = std::numeric_limits<float>::max(); // far = no hit. int nodeStackIndex = 0; int nodeStack[512]; nodeStack[0] = 0; IsectVector isectPQ; isects->clear(); int dirSign[3]; dirSign[0] = ray.dir[0] < 0.0 ? 1 : 0; dirSign[1] = ray.dir[1] < 0.0 ? 1 : 0; dirSign[2] = ray.dir[2] < 0.0 ? 1 : 0; // @fixme { Check edge case; i.e., 1/0 } float3 rayInvDir; rayInvDir[0] = 1.0 / ray.dir[0]; rayInvDir[1] = 1.0 / ray.dir[1]; rayInvDir[2] = 1.0 / ray.dir[2]; float3 rayOrg; rayOrg[0] = ray.org[0]; rayOrg[1] = ray.org[1]; rayOrg[2] = ray.org[2]; float minT, maxT; while (nodeStackIndex >= 0) { int index = nodeStack[nodeStackIndex]; BVHNode &node = nodes_[index]; nodeStackIndex--; bool hit = IntersectRayAABB(minT, maxT, hitT, node.bmin, node.bmax, rayOrg, rayInvDir, dirSign); if (node.flag == 0) { // branch node if (hit) { int orderNear = dirSign[node.axis]; int orderFar = 1 - orderNear; // Traverse near first. nodeStack[++nodeStackIndex] = node.data[orderFar]; nodeStack[++nodeStackIndex] = node.data[orderNear]; } } else { // leaf node if (hit) { if (MultiHitTestLeafNode(isectPQ, maxIntersections, node, indices_, vertices, faces, ray, epsScale_)) { // Only update `hitT` when queue is full. if (isectPQ.size() >= (size_t)maxIntersections) { hitT = isectPQ.top().t; } } } } } assert(nodeStackIndex < kMaxStackDepth); if (!isectPQ.empty()) { // Store intesection in reverse order(make it frontmost order) size_t n = isectPQ.size(); isects->resize(n); for (size_t i = 0; i < n; i++) { const Intersection &isect = isectPQ.top(); isects[n - i - 1] = isect; isectPQ.pop(); } return true; } return false; } } // namespace #endif #endif // __NANORT_H__
GB_unop__identity_uint8_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint8_int8) // op(A') function: GB (_unop_tran__identity_uint8_int8) // C type: uint8_t // A type: int8_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = (uint8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint8_int8) ( uint8_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint8_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
guess.c
#include <stdint.h> #include <omp.h> #include "geometry.h" #include "mesh2geo.h" #include "phy.h" /* Set the initial guess */ void iguess(struct geometry *g) { uint32_t i; #pragma omp parallel for for(i = 0; i < g->n->sz; i++) { g->q->q[i * g->c->b + 0] = P; g->q->q[i * g->c->b + 1] = U; g->q->q[i * g->c->b + 2] = V; g->q->q[i * g->c->b + 3] = W; } }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
GB_unaryop__abs_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp32_fp32 // op(A') function: GB_tran__abs_fp32_fp32 // C type: float // A type: float // cast: float cij = (float) aij // unaryop: cij = fabsf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabsf (x) ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp32_fp32 ( float *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
operator_tune-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef MXNET_OPERATOR_OPERATOR_TUNE_INL_H_ #define MXNET_OPERATOR_OPERATOR_TUNE_INL_H_ #include <dmlc/base.h> #include <dmlc/logging.h> #include <mshadow/base.h> #include <atomic> #include <cstdint> #include <chrono> #include <thread> #include <string> #include <vector> #include <algorithm> #include <list> #include <random> #include <unordered_set> #include "./mxnet_op.h" #include "./operator_tune.h" #if (__GNUC__ >= 4 || (__GNUC__ >= 3 && __GNUC_MINOR__ >= 4)) && !defined(__mips__) # define HAS_CXA_DEMANGLE 1 #else # define HAS_CXA_DEMANGLE 0 #endif #if HAS_CXA_DEMANGLE #include <cxxabi.h> #endif namespace mxnet { namespace op { #ifndef MXNET_NO_INLINE #ifdef _MSC_VER #define MXNET_NO_INLINE __declspec(noinline) #else #define MXNET_NO_INLINE __attribute__((noinline)) #endif #endif // MXNET_NO_INLINE #define OUTSIDE_COUNT_SHIFT 9 namespace tune { /*! * \brief Convert TuningMode value to a string representation * \param tm Scalar TuningMode value * \return Character pointer to a string representing the TuningMode value */ inline const char *TuningModeToString(const TuningMode tm) { switch (tm) { case kAuto: return "Auto"; case kNeverOMP: return "NeverOMP"; case kAlwaysOMP: return "AlwaysOMP"; default: CHECK(false) << "Unknown TuningMode type: " << static_cast<int>(tm); return "<unknown>"; } } } // namespace tune /*! * \brief Engine to tune kernel operations * \tparam DType Data type to be used when tuning the kernel operations * \remarks The basic concept here is that we time how long a trivial loop takes with and without * OMP, subtracting the non-OMP run from the OMP run, which gives us the time * that the OMP overhead takes. Times were found to be relatively invariant with * regard ot the number of threads/cores on a given machine. * Secondly, supplied operators are run and timed (for each data type) in order to determine * their individual time cost. * * Knowing the following items, we can determine how long the OMP and non-OMP run * is expected to take: * 1) OMP overhead time * 2) Number of iterations required * 3) Number of threads to be used if we choose the OMP method * 4) The data type * * Therefore, at Kernel::Launch() time, we can estimate whether it is faster to use OMP or not * for the given kernel operator. * * Results and efficiency of the tuning is tested in the gtest OMP_TUNING test suite */ template<typename DType> class OperatorTune : public OperatorTuneByType<DType> { public: using Tick = OperatorTuneBase::Tick; using duration_t = OperatorTuneBase::duration_t; using OperatorTuneByType<DType>::tuning_mode_; /*! * \brief Constructor */ OperatorTune() { TuneAll(); } /*! * \brief Initialize the OperatorTune object * \return Whether the OperatorTune object was successfully initialized */ static bool Initialize() { if (!initialized_) { initialized_ = true; // Generate some random data for calling the operator kernels data_set_.reserve(0x100); std::random_device rd; std::mt19937 gen(rd()); if (!std::is_integral<DType>::value) { std::uniform_real_distribution<> dis(-1, 1); for (int n = 0; n < 0x100; ++n) { const auto val = static_cast<DType>(dis(gen)); // If too close to zero, try again if (std::fabs(static_cast<double>(val)) < 1e-5) { --n; continue; } data_set_.emplace_back(val); } } else { std::uniform_int_distribution<> dis(-128, 127); for (int n = 0; n < 0x100; ++n) { const auto val = static_cast<DType>(dis(gen)); // If zero, try again if (!val) { --n; continue; } data_set_.emplace_back(val); } } // Use this environment variable to generate new tuning statistics // In order to avoid printing too many copies, only the float32 object prints output_tuning_data_ = mshadow::DataType<DType>::kFlag == mshadow::kFloat32 && dmlc::GetEnv("MXNET_OUTPUT_TUNING_DATA", false); // If outputting tuning data, then also output verbose logging info OperatorTuneBase::verbose_tuning_info_ = dmlc::GetEnv("MXNET_VERBOSE_TUNING_INFO", false); OperatorTuneBase::tuning_weight_scale_ = dmlc::GetEnv("MXNET_TUNING_WEIGHT_SCALE", 0.0); // This isn't actually supposed to be multithreaded init, but just to be sure the change is // seen everywhere, using atomic bool. if (!OperatorTuneBase::calculated_.load()) { // Not especially concerned with a race condition, since this hsould // run when only one thread is active (static init), just don't cache this variable OperatorTuneBase::calculated_.store(true); OperatorTuneBase::omp_overhead_ns_ = GetOMPLoopOverhead(); std::string config = dmlc::GetEnv("MXNET_USE_OPERATOR_TUNING", std::string()); ParseEnablerConfig(config); } if (OperatorTuneBase::verbose_tuning_info_) { LOG(INFO) << "OMP overhead: " << OperatorTuneBase::omp_overhead_ns_ << " nanoseconds"; } } return true; } /*! * \brief Schedule a tuning run * \tparam OP Operator to tune * \param tune_func Function to call which tunes the operator * \return true if the tune operation was scheduled */ template<typename OP> static bool ScheduleTune(void (*tune_func)()) { #ifdef MXNET_USE_OPERATOR_TUNING if (tune_func) { GetTuningList()->push_back(tune_func); operator_names_.insert(demangle(typeid(OP).name())); return true; } return false; #else return true; #endif } /*! * \brief Is the template parameter type a tuned kernel? * \tparam OP kernel operator type * \return true if the operator/kernel is tuned */ template<typename OP> static bool IsTuned() { return operator_names_.find(demangle(typeid(OP).name())) != operator_names_.end(); } /*!\ * \brief Tune all registered kernel operators that haven't already been tuned */ static bool TuneAll() { Initialize(); std::list<void (*)()> *tl = GetTuningList(); const size_t size_save = tl->size(); // For checking if anything asynchronous is // adding or removing items, which is forbidden if (output_tuning_data_ && !tl->empty()) { // Only emit this once, use the most common case, 'float32' if (mshadow::DataType<DType>::kFlag == mshadow::kFloat32) { std::cout << "OperatorTuneBase::duration_t " << "OperatorTuneBase::omp_overhead_ns_ = " << OperatorTuneBase::omp_overhead_ns_ << ";" << std::endl << std::flush; } } const Tick start = std::chrono::high_resolution_clock::now(); for (auto i : *tl) { (*i)(); } if (OperatorTuneBase::verbose_tuning_info_) { const duration_t duration = OperatorTune::GetDurationInNanoseconds(start); LOG(INFO) << "Op Tuning for " << type_name<DType>() << " took " << (duration / 1000000) << " ms"; } CHECK_EQ(size_save, tl->size()) << "Tuning list size should not have changed while tuning"; tl->clear(); return true; } /*! * \brief Return set of operator names that were registered to be tuned. Does not imply * that the operator has been tuned. * \return Set of operator/kernel names that were registered for tuning */ static const std::unordered_set<std::string>& TunedOperatorNames() { return operator_names_; } protected: /*! * \brief Get the list of tuning function calls for the operators * \return Pointer to list of tuning function calls */ static std::list<void (*)()> *GetTuningList(); /*! * \brief Demangle typeid::name() in order to generate source macros * \param name C++ Mangled name * \return Demangled name as string */ static inline std::string demangle(const char *name) { #if HAS_CXA_DEMANGLE int status = -4; // some arbitrary value to eliminate the compiler warning std::unique_ptr<char, void (*)(void *)> res{ abi::__cxa_demangle(name, nullptr, nullptr, &status), &std::free }; return status ? name : res.get(); #else return name; #endif } /*! * \brief Type name as string * \tparam T Type * \return std::string representing the human-readable demangled type name */ template<typename T> static inline std::string type_name() { return demangle(typeid(T).name()); } /*! \brief Measure OMP overhead for a trivial OMP loop using all cores * \param omp_thread_count - Number of OMP threads to use in the timing test * \returns Duration in nanoseconds for the OMP overhead (time to initiate and close the * OMP session) */ static duration_t GetOMPLoopOverhead(const size_t omp_thread_count) { CHECK_GT(omp_thread_count, 1); // Don't try to use OMP for one thread int wl_count = OperatorTuneBase::WORKLOAD_COUNT; Tick start = std::chrono::high_resolution_clock::now(); // Use two loops in order to simulate OMP outside timing for (size_t i = 0; i < OUTSIDE_COUNT; ++i) { for (int x = 0; x < wl_count; ++x) { // trivial operation volatile_int_ += x; } } const OperatorTuneBase::duration_t no_omp_duration = OperatorTuneBase::GetDurationInNanoseconds(start); // Scale OMP iterations by type calculation complexity double factor; // if tuning_weight_scale_ is a number that looks valid, use it as the factor if (OperatorTuneBase::tuning_weight_scale_ > 0.01) { factor = OperatorTuneBase::tuning_weight_scale_; } else { // These are empirically-determined constants found by balancing between // a desktop (8 & 12 cpu's) and large cloud instances (32 & 64 cpu's) switch (mshadow::DataType<DType>::kFlag) { case mshadow::kUint8: case mshadow::kInt8: factor = 8.5; break; case mshadow::kInt32: factor = 4.5; break; case mshadow::kInt64: factor = 2; break; case mshadow::kFloat64: factor = 1.25; break; case mshadow::kFloat32: default: factor = 1.0; break; } } wl_count = static_cast<int>(factor * OperatorTuneBase::WORKLOAD_COUNT * omp_thread_count); start = std::chrono::high_resolution_clock::now(); for (size_t i = 0; i < OUTSIDE_COUNT; ++i) { #pragma omp parallel for num_threads(omp_thread_count) for (int x = 0; x < wl_count; ++x) { // trivial operation volatile_int_ += x; } } const duration_t omp_duration = OperatorTuneBase::GetDurationInNanoseconds(start) - no_omp_duration; return omp_duration >> OUTSIDE_COUNT_SHIFT; } /*! \brief Measure OMP overhead for a trivial OMP loop using all cores * \returns Time in nanoseconds to initialize/cleanup when excuting an OMP block */ static duration_t GetOMPLoopOverhead() { // It was found empirically that OMP times was not heavily tied to number of cores, // so take an average across all core counts const auto max_cores = static_cast<size_t>(omp_get_num_procs()) >> 1; if (max_cores >= 2) { std::vector<duration_t> core_times; // Take care of any OMP lazy-init with a throwaway call for (size_t omp_threads = 2; omp_threads <= max_cores; ++omp_threads) { GetOMPLoopOverhead(omp_threads); } std::vector<duration_t> durations; durations.reserve(max_cores - 1); for (size_t omp_threads = 2; omp_threads <= max_cores; ++omp_threads) { const duration_t duration = GetOMPLoopOverhead(omp_threads); if (OperatorTuneBase::verbose_tuning_info_) { LOG(INFO) << "OMP Thread Count: " << omp_threads << ", overhead: " << duration << " ns"; } durations.emplace_back(duration); } // return median std::sort(durations.begin(), durations.end()); return durations[durations.size() >> 1]; } return INT_MAX; // If only one core, then never use OMP (say the overhead is huge) } /*! * \brief Some string utility functions that aren't specific to tuning */ struct StringUtil { /*! * \brief Terim whitespace from beninning and end of string * \param s String to trimp * \return reference to the modified string. This is the same std::string object as what was * supplied in the parameters */ static std::string &trim(std::string *s) { s->erase(s->begin(), std::find_if(s->begin(), s->end(), [](int ch) { return !std::isspace(ch); })); s->erase(std::find_if(s->rbegin(), s->rend(), [](int ch) { return !std::isspace(ch); }).base(), s->end()); return *s; } /*! * \brief Tokenize a string into a list of tokens * \param s String to tokenize * \return std::list of tokens */ static std::list<std::string> string2list(const std::string &s) { std::list<std::string> res; std::istringstream iss(s); std::string token; while (std::getline(iss, token, ',')) { trim(&token); if (!token.empty()) { res.push_back(token); } } return res; } }; /*! * \brief Get data type from string representation * \warning Do not call from a performance-sensitive area */ static int type_from_string(const std::string& type_string) { if (type_string == "float32") return mshadow::kFloat32; if (type_string == "float64") return mshadow::kFloat64; if (type_string == "float16") return mshadow::kFloat16; if (type_string == "int8") return mshadow::kInt8; if (type_string == "uint8") return mshadow::kUint8; if (type_string == "int32") return mshadow::kInt32; if (type_string == "int64") return mshadow::kInt64; return -1; // invalid } /*! * \brief Parse MXNET_ENABLE_OPERATOR_TUNING environment variable * \param config String representation of MXNET_ENABLE_OPERATOR_TUNING environment variable * Values: * 0=disable all * 1=enable all * float32, float16, float32=list of types to enable, and disable those not listed */ static void ParseEnablerConfig(std::string config) { StringUtil::trim(&config); if (!config.empty()) { // First disable all OperatorTuneByType<float>::set_tuning_mode(tune::kAlwaysOMP); OperatorTuneByType<double>::set_tuning_mode(tune::kAlwaysOMP); OperatorTuneByType<int8_t>::set_tuning_mode(tune::kAlwaysOMP); OperatorTuneByType<uint8_t>::set_tuning_mode(tune::kAlwaysOMP); OperatorTuneByType<int32_t>::set_tuning_mode(tune::kAlwaysOMP); OperatorTuneByType<int64_t>::set_tuning_mode(tune::kAlwaysOMP); // See if it's a non-number (ie type or list of types) if (!::isdigit(config[0])) { OperatorTuneByType<mshadow::half::half_t>::set_tuning_mode(tune::kAuto); std::list<std::string> tokens = StringUtil::string2list(config); for (const std::string& stype : tokens) { // We don't have an enum for halt_t const int typ = type_from_string(stype); if (typ >= 0) { switch (typ) { case mshadow::kFloat32: OperatorTuneByType<float>::set_tuning_mode(tune::kAuto); break; case mshadow::kFloat64: OperatorTuneByType<double>::set_tuning_mode(tune::kAuto); break; case mshadow::kFloat16: OperatorTuneByType<mshadow::half::half_t>::set_tuning_mode(tune::kAuto); break; case mshadow::kInt8: OperatorTuneByType<int8_t>::set_tuning_mode(tune::kAuto); break; case mshadow::kUint8: OperatorTuneByType<uint8_t>::set_tuning_mode(tune::kAuto); break; case mshadow::kInt32: OperatorTuneByType<int32_t>::set_tuning_mode(tune::kAuto); break; case mshadow::kInt64: OperatorTuneByType<int64_t>::set_tuning_mode(tune::kAuto); break; default: CHECK(false) << "Unsupported tuning data type: " << stype; break; } } else { // -1 is error LOG(WARNING) << "Unknown data type to be tuned: " << stype; } } } else { if (std::atoi(config.c_str()) > 0) { OperatorTuneByType<float>::set_tuning_mode(tune::kAuto); OperatorTuneByType<double>::set_tuning_mode(tune::kAuto); OperatorTuneByType<int8_t>::set_tuning_mode(tune::kAuto); OperatorTuneByType<uint8_t>::set_tuning_mode(tune::kAuto); OperatorTuneByType<int32_t>::set_tuning_mode(tune::kAuto); OperatorTuneByType<int64_t>::set_tuning_mode(tune::kAuto); OperatorTuneByType<mshadow::half::half_t>::set_tuning_mode(tune::kAuto); } } } } /*! \brief Whether this object has been initialized */ static bool initialized_; /*! \brief Number of passes to obtain an average */ static constexpr duration_t OUTSIDE_COUNT = (1 << OUTSIDE_COUNT_SHIFT); /*! \brief Random data for timing operator calls */ static std::vector<DType> data_set_; /*! \brief Operators tuned */ static std::unordered_set<std::string> operator_names_; /*! \brief Arbitary object to modify in OMP loop */ static volatile int volatile_int_; /*! \brief Output insertable (into code) instantiation+default-value macros */ static bool output_tuning_data_; }; /*! * \brief Class that tunes unary operators * \tparam DType Data type to be used when tuning the kernel operations */ template<typename DType> class UnaryOpTune : public OperatorTune<DType> { protected: typedef OperatorTune<DType> Super; using duration_t = typename Super::duration_t; using Tick = typename Super::Tick; /*! * \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations * Used for kernels that take no arguments (ie set_zero) * \tparam OP Kernel operator * \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations */ template<typename OP> static duration_t GetBlankWorkload() { DType tmp; volatile DType *res = &tmp; const Tick start = std::chrono::high_resolution_clock::now(); for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) { // Use a logical AND instead of mod to avoid affecting the timing result with a slow divide *res += OP::Map(); } const duration_t omp_duration = Super::GetDurationInNanoseconds(start); return omp_duration ? omp_duration : 1; } /*! * \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations * Used for kernels that take one argument (ie sqrt()) * \tparam OP Kernel operator * \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations */ template<typename OP> static duration_t GetUnaryWorkload() { DType tmp; volatile DType *res = &tmp; const Tick start = std::chrono::high_resolution_clock::now(); for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) { // Use a logical AND instead of mod to avoid affecting the timing result with a slow divide *res = OP::Map(Super::data_set_[i & 0xFF]); } const duration_t omp_duration = Super::GetDurationInNanoseconds(start); return omp_duration ? omp_duration : 1; } /*! * \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations * Used for kernels that take two arguments (ie elemwise_add()) * \tparam OP Kernel operator * \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations */ template<typename OP> static inline duration_t GetBinaryWorkload() { DType tmp; volatile DType *res = &tmp; const Tick start = std::chrono::high_resolution_clock::now(); for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) { // Use a logical AND instead of mod to avoid affecting the timing result with a slow divide *res = OP::Map(Super::data_set_[i & 0xFF], Super::data_set_[(i + 1) & 0xFF]); } const duration_t omp_duration = Super::GetDurationInNanoseconds(start); return omp_duration ? omp_duration : 1; } /*! * \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations * Used for kernels that take three arguments (ie backwards_grad<elemwise_add>()) * \tparam OP Kernel operator * \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations */ template<typename OP> static duration_t GetTertiaryWorkload() { DType tmp; volatile DType *res = &tmp; const Tick start = std::chrono::high_resolution_clock::now(); for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) { // Use a logical AND instead of mod to avoid affecting the timing result with a slow divide *res = OP::Map(Super::data_set_[i & 0xFF], Super::data_set_[(i + 1) & 0xFF], Super::data_set_[i & 0xFF]); } const duration_t omp_duration = Super::GetDurationInNanoseconds(start); return omp_duration ? omp_duration : 1; } /*! * \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations * Used for mxnet-like kernels that take no arguments) * \tparam OP Kernel operator * \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations */ template<typename OP> static duration_t GetBlankWorkloadEx() { std::unique_ptr<DType[]> tmp(new DType[Super::WORKLOAD_COUNT]); DType *tmp_ptr = tmp.get(); const Tick start = std::chrono::high_resolution_clock::now(); for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) { OP::Map(i, tmp_ptr); } const duration_t omp_duration = Super::GetDurationInNanoseconds(start); return omp_duration ? omp_duration : 1; } public: /*! * \brief Tune the specified kernel operator. Optionally print out C++ macro that defines the * tuning data variable and the default tuned value * This function tunes an operator which takes no arguments * \tparam OP The kernel operator to be tuned */ template<typename OP> static void TuneBlankOperator() { mxnet::op::mxnet_op::tuned_op<OP, DType>::workload_[0] = GetBlankWorkload<OP>(); if (Super::output_tuning_data_) { std::cout << "IMPLEMENT_UNARY_WORKLOAD_FWD(" << Super::template type_name<OP>() << "); // NOLINT()" << std::endl << std::flush; // For long lines } } /*! * \brief Tune the specified kernel operator. Optionally print out C++ macro that defines the * tuning data variable and the default tuned value * This function tunes an operator which takes one argument * \tparam OP The kernel operator to be tuned */ template<typename OP> static void TuneUnaryOperator() { mxnet::op::mxnet_op::tuned_op<OP, DType>::workload_[0] = GetUnaryWorkload<OP>(); if (Super::output_tuning_data_) { std::cout << "IMPLEMENT_UNARY_WORKLOAD_FWD(" << Super::template type_name<OP>() << "); // NOLINT()" << std::endl << std::flush; // For long lines } } /*! * \brief Tune the specified kernel operator. Optionally print out C++ macro that defines the * tuning data variable and the default tuned value * This function tunes a backward operator which takes one argument * \tparam OP The kernel operator to be tuned */ template<typename OP> static void TuneUnaryBackwardOperator() { mxnet::op::mxnet_op::tuned_op<mxnet_op::backward_grad_tuned<OP>, DType>::workload_[0] = GetBinaryWorkload<mxnet::op::mxnet_op::backward_grad_tuned<OP>>(); if (Super::output_tuning_data_) { std::cout << "IMPLEMENT_UNARY_WORKLOAD_BWD(" << Super::template type_name<OP>() << "); // NOLINT()" << std::endl << std::flush; // For long lines } } /*! * \brief Tune the specified "mxnet_op-type" kernel operator. * Optionally print out C++ macro that defines the * tuning data variable and the default tuned value * This function tunes an operator which takes no arguments * \tparam OP The kernel operator to be tuned */ template<typename OP> static void TuneBlankOperatorEx() { mxnet::op::mxnet_op::tuned_op<OP, DType>::workload_[0] = GetBlankWorkloadEx<OP>(); if (Super::output_tuning_data_) { std::cout << "IMPLEMENT_BLANK_WORKLOAD_FWD(" << Super::template type_name<OP>() << "); // NOLINT()" << std::endl << std::flush; // For long lines } } /*! * \brief Determine whether to use OMP based upon both timing and configuration using the * given (templated) operator's workload * \tparam OP Operator whose workload to use (tuned_op::workload_[0]) * \param N Number of iterations desired * \param thread_count Number of OMP threads available to perform the iterations * \returns Whether it's faster to use OMP for these iterations */ template<typename OP> inline static bool UseOMP(size_t N, size_t thread_count) { return OperatorTune<DType>::UseOMP(N, thread_count, static_cast<uint64_t>(N) * OP::workload_[0]); } }; /*! * \brief Class that tunes binary and unary operators * \tparam DType Data type to be used when tuning the kernel operations */ template<typename DType> class BinaryOpTune : public UnaryOpTune<DType> { protected: typedef UnaryOpTune<DType> Super; public: /*! * \brief Tune a generic binary operator * @tparam OP - Operator type */ template<typename OP> static void TuneBinaryOperator() { mxnet_op::tuned_op<OP, DType>::workload_[0] = Super::template GetBinaryWorkload<OP>(); if (Super::Super::output_tuning_data_) { std::cout << "IMPLEMENT_BINARY_WORKLOAD_FWD(" << Super::template type_name<OP>() << "); // NOLINT()" << std::endl << std::flush; // For long lines } } /*! * \brief Tune binary backward operator * \tparam OP - operator */ template<typename OP> static void TuneBinaryBackwardOperator() { mxnet::op::mxnet_op::tuned_op<mxnet_op::backward_grad_tuned<OP>, DType>::workload_[0] = Super::template GetTertiaryWorkload<mxnet::op::mxnet_op::backward_grad_tuned<OP>>(); if (Super::Super::output_tuning_data_) { std::cout << "IMPLEMENT_BINARY_WORKLOAD_BWD(" << Super::template type_name<OP>() << "); // NOLINT()" << std::endl << std::flush; // For long lines } } }; #undef OUTSIDE_COUNT_SHIFT #undef WORKLOAD_COUNT_SHIFT } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_OPERATOR_TUNE_INL_H_
FunctionUtil.h
// Copyright (c) 2013, Adam Harrison* // http://www.ualberta.ca/~apharris/ // All rights reserved. // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: // -Redistributions of source code must retain the above copyright notice, the footnote below, this list of conditions and the following disclaimer. // -Redistributions in binary form must reproduce the above copyright notice, the footnote below, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. // -Neither the name of the University of Alberta nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // *This work originated as part of a Ph.D. project under the supervision of Dr. Dileepan Joseph at the Electronic Imaging Lab, University of Alberta. #ifndef FUNCTION_UTIL_H #define FUNCTION_UTIL_H #define LIBMIA_LOG2E 1.44269504088896340736 #include <chrono> #include <iostream> #include <algorithm> #include <boost/numeric/conversion/converter.hpp> #include "LibMIAUtil.h" #include "LibMIARanges.h" #include "IndexUtil.h" #include "PermuteIterator.h" namespace LibMIA { #define sind(x) (sin(fmod((x),360) * M_PI / 180)) #define cosd(x) (cos(fmod((x),360) * M_PI / 180)) namespace internal{ /** \addtogroup util Utilities * @{ */ //index_order is order going from LHS to in RHS - ie {1 3 2 0} means a(i,j,k,l)=b(l,i,k,j) template<class Derived,class otherDerived, class Op, class index_param_type> typename MIAMergeReturnType<Derived,otherDerived>::type perform_implicit_merge(const MIA<Derived>& a, const MIA<otherDerived>& b,const Op& op,const std::array<index_param_type,internal::order<Derived>::value>& index_order){ typedef typename MIAMergeReturnType<Derived,otherDerived>::type retMIAType; typedef typename internal::index_type<otherDerived>::type b_index_type; retMIAType c(a.dims()); typename MIA<Derived>::accumulator_type dim_accumulator; typename MIA<Derived>::fast_accumulator_type fast_dim_accumulator; typename MIA<Derived>::multiplier_type multiplier; internal::create_shuffle_needs(a.dims(),b.dims(),index_order,dim_accumulator,fast_dim_accumulator,multiplier); if(a.dimensionality()>=PARALLEL_TOL){ #pragma omp parallel for for(b_index_type idx=0;idx<a.dimensionality();++idx) c.atIdx(idx)=op(a.atIdx(idx),a.convert(b.atIdx(internal::reShuffleLinearIndex(idx,multiplier,fast_dim_accumulator,dim_accumulator)))); } else{ for(b_index_type idx=0;idx<a.dimensionality();++idx) c.atIdx(idx)=op(a.atIdx(idx),a.convert(b.atIdx(internal::reShuffleLinearIndex(idx,multiplier,fast_dim_accumulator,dim_accumulator)))); } return c; } template<class Derived,class otherDerived, class Op> typename MIAMergeReturnType<Derived,otherDerived>::type perform_implicit_merge(const MIA<Derived>& a, const MIA<otherDerived>& b,const Op& op){ typedef typename MIAMergeReturnType<Derived,otherDerived>::type retMIAType; retMIAType c(a.dims()); if(a.dimensionality()>=PARALLEL_TOL){ #pragma omp parallel for for(size_t idx=0;idx<a.dimensionality();++idx) c.atIdx(idx)=op(a.atIdx(idx),a.convert(b.atIdx(idx))); } else{ #pragma omp parallel for for(size_t idx=0;idx<a.dimensionality();++idx) c.atIdx(idx)=op(a.atIdx(idx),a.convert(b.atIdx(idx))); } return c; // typedef typename internal::function_type<retMIAType>::type function_type; // typedef typename internal::index_type<retMIAType>::type index_type; // // // // // auto _function=[&a,&b,op](index_type idx){ // return op(a.atIdx(idx),a.convert(b.atIdx(idx))); // //return a.atIdx(idx)+b.atIdx(idx); // }; // // // return retMIAType(_function,a.dims()); } //Base Case template<size_t cur_partition,class MIAType2,class index_it,size_t no_con_partition, typename boost::enable_if_c< cur_partition == 0, int >::type = 0 > typename internal::data_type<MIAType2>::type collect_contract_partitions(const MIAType2 & source,typename internal::index_type<MIAType2>::type cur_index,const std::array<size_t, no_con_partition>& contract_ranges,const index_it contract_idx_end,const std::array<int,no_con_partition> & contract_partition) { return source.atIdx(cur_index); } template<size_t cur_partition,class MIAType2,class index_it,size_t no_con_partition, typename boost::disable_if_c< cur_partition == 0, int >::type = 0 > typename internal::data_type<MIAType2>::type collect_contract_partitions(const MIAType2 & source,typename internal::index_type<MIAType2>::type cur_index,const std::array<size_t, no_con_partition>& contract_ranges,const index_it contract_idx_end,const std::array<int,no_con_partition> & contract_partition) { typedef typename internal::data_type<MIAType2>::type data_type; data_type sum=0; for(int j=0;j<(int)contract_ranges[cur_partition-1];++j){ auto j_contract_idx=internal::get_contract_idx(j, contract_idx_end-contract_partition[cur_partition-1],contract_idx_end, source.dims()); sum+=collect_contract_partitions<cur_partition-1,MIAType2,index_it,no_con_partition>(source,cur_index+j_contract_idx,contract_ranges,contract_idx_end-contract_partition[cur_partition-1],contract_partition); } return sum; } template<class Operand1, class Operand2, class index_type, size_t degree> index_type performShuffledOperation(Operand1 & restrict_libmia operand1, const Operand2 & restrict_libmia operand2, const std::array<index_type, degree> & dims, const std::array<index_type, degree> & multiplier, size_t curIndex, index_type sourceIdx, index_type destIdx){ if (curIndex == 0){ const index_type end = destIdx + dims[0]; for (; destIdx < end; ++destIdx,sourceIdx+=multiplier[0]){ operand1.atIdx(destIdx) = operand2.atIdx(sourceIdx); } } else{ for (index_type dim_idx = 0; dim_idx < dims[curIndex]; ++dim_idx){ destIdx = performShuffledOperation(operand1, operand2, dims, multiplier, curIndex - 1, sourceIdx, destIdx); sourceIdx += multiplier[curIndex]; } } return destIdx; } template<class Operand1, class Operand2, class index_type, size_t degree> index_type performShuffledOperationParallel(Operand1 & restrict_libmia operand1, const Operand2 & restrict_libmia operand2, const std::array<index_type, degree> & dims, const std::array<index_type, degree> & multiplier, size_t curIndex, index_type sourceIdx, index_type destIdx){ if (curIndex == 0){ const index_type end = destIdx + dims[0]; #pragma omp parallel for for (index_type temp_idx = 0; temp_idx<dims[0]; ++temp_idx){ operand1.atIdx(destIdx + temp_idx) = operand2.atIdx(sourceIdx + temp_idx*multiplier[0]); } } else{ const index_type multiplier_dest = operand1.dimensionality() / dims.back(); #pragma omp parallel for for (index_type dim_idx = 0; dim_idx < dims[curIndex]; ++dim_idx){ performShuffledOperation(operand1, operand2, dims, multiplier, curIndex - 1, sourceIdx + dim_idx*multiplier[curIndex], destIdx + multiplier_dest*dim_idx); //sourceIdx += multiplier[curIndex]; } } return destIdx; } template<typename Derived, class idx_typeR, class idx_typeC, class idx_typeT, size_t R, size_t C, size_t T> auto latticeCopy(const MIA<Derived> &mia, const std::array<idx_typeR,R> & row_indices, const std::array<idx_typeC,C> & column_indices,const std::array<idx_typeT,T> & tab_indices) ->DenseLattice<typename internal::data_type<Derived>::type> { using namespace std::chrono; //typedef std::chrono::duration<float> float_seconds; high_resolution_clock::time_point t1, t2; //t1 = high_resolution_clock::now(); //print_array(row_indices,"row_indices"); //print_array(column_indices,"column_indices"); //print_array(tab_indices,"tab_indices"); typedef typename MIA<Derived>::unsigned_index_type unsigned_index_type; typedef typename internal::data_type<Derived>::type data_type; constexpr auto order= internal::order<Derived>::value ; static_assert(internal::check_index_compatibility<unsigned_index_type, idx_typeR>::type::value, "Must use an array convertable to index_type"); static_assert(internal::check_index_compatibility<unsigned_index_type, idx_typeC>::type::value, "Must use an array convertable to index_type"); static_assert(internal::check_index_compatibility<unsigned_index_type, idx_typeT>::type::value, "Must use an array convertable to index_type"); static_assert(R + C + T == order, "Size of all three arrays must equal mOrder"); //statically check number of indices match up size_t row_size=1, column_size=1, tab_size=1; //std::cout <<"Tab " << tab_indices[0] << " " << tab_indices.size() << "\n"; //std::cout <<"Dims " << this->m_dims[0] << " " << this->m_dims.size() << "\n"; row_size=internal::dimensionality_from(mia.dims(), row_indices); column_size=internal::dimensionality_from(mia.dims(), column_indices); tab_size=internal::dimensionality_from(mia.dims(), tab_indices); std::array<unsigned_index_type, R + C + T> shuffled_dims; std::array<size_t,R+C+T> index_order; concat_arrays(row_indices,column_indices,tab_indices,index_order); internal::reorder_from(mia.dims(),index_order,shuffled_dims); //std::cout<< "Tab dims " << tab_dims[0] << "\n"; DenseLattice<data_type> lat(row_size, column_size, tab_size); typename MIA<Derived>::accumulator_type dim_accumulator; typename MIA<Derived>::fast_accumulator_type fast_dim_accumulator; typename MIA<Derived>::multiplier_type multiplier; internal::create_shuffle_needs(shuffled_dims,mia.dims(),index_order,dim_accumulator,fast_dim_accumulator,multiplier); if (mia.dimensionality() >= PARALLEL_TOL) performShuffledOperationParallel(lat, mia, shuffled_dims, multiplier, size_t(R + C + T - 1), unsigned_index_type(0), unsigned_index_type(0)); else performShuffledOperation(lat, mia, shuffled_dims, multiplier, size_t(R + C + T - 1), unsigned_index_type(0), unsigned_index_type(0)); /*if(mia.dimensionality()>=PARALLEL_TOL){ #pragma omp parallel for for(index_type idx=0;idx<mia.dimensionality();++idx){ lat.atIdx(idx)=mia.atIdx(internal::reShuffleLinearIndex(idx,multiplier,fast_dim_accumulator,dim_accumulator)); } } else{ for(index_type idx=0;idx<mia.dimensionality();++idx){ lat.atIdx(idx)=mia.atIdx(internal::reShuffleLinearIndex(idx,multiplier,fast_dim_accumulator,dim_accumulator)); } }*/ /*t2 = high_resolution_clock::now(); std::cout << "\t" << duration_cast<float_seconds>(t2 - t1).count();*/ return lat; } template<typename MIA,typename otherMIA, typename array_type,size_t Inter,size_t L_outer,size_t R_outer> auto implicitNoLatticeMult(const MIA &a,const otherMIA &b,const std::array<array_type,Inter>&l_inter_idx,const std::array<array_type,L_outer>&l_outer_idx,const std::array<array_type,Inter>&r_inter_idx,const std::array<array_type,R_outer>&r_outer_idx) ->typename MIANoLatticeProductReturnType<MIA,otherMIA,L_outer+R_outer+Inter>::type { typedef typename MIANoLatticeProductReturnType<MIA,otherMIA,L_outer+R_outer+Inter>::type RetType; typedef typename internal::index_type<RetType>::type index_type; typedef typename internal::index_type<MIA>::type a_index_type; typedef typename internal::index_type<otherMIA>::type b_index_type; typedef typename internal::function_type<RetType>::type function_type; static_assert(internal::check_index_compatibility<index_type,array_type>::type::value,"Must use an array convertable to index_type"); std::array<a_index_type, Inter> l_inter_dims; std::array<a_index_type, L_outer> l_outer_dims; std::array<b_index_type, Inter> r_inter_dims; std::array<b_index_type, R_outer> r_outer_dims; //get inter and outer dimensionality and the individual dimensions that make up that number - should default to one if any of the arrays are empty size_t l_inter_size=internal::reorder_from(a.dims(), l_inter_idx,l_inter_dims); size_t r_inter_size= internal::reorder_from(b.dims(), r_inter_idx,r_inter_dims); if(l_inter_size!=r_inter_size || !std::equal(l_inter_dims.begin(),l_inter_dims.end(),r_inter_dims.begin())) throw DimensionMismatchException("Element-wise dimensions must match during MIA multiplication"); internal::reorder_from(a.dims(), l_outer_idx,l_outer_dims); internal::reorder_from(b.dims(), r_outer_idx,r_outer_dims); std::array<index_type,L_outer+R_outer+Inter> retDims; concat_arrays(l_outer_dims, r_outer_dims,l_inter_dims,retDims); RetType c(retDims); //create lambda function function_type _function=[&a,&b,&c,l_inter_idx,r_inter_idx,l_outer_idx,r_outer_idx](index_type _index){ auto full_indices=internal::ind2sub(_index,c.dims()); a_index_type l_idx(0); b_index_type r_idx(0); l_idx+=internal::sub2ind(full_indices.begin(),full_indices.begin()+L_outer,l_outer_idx,a.dims()); l_idx+=internal::sub2ind(full_indices.begin()+L_outer+R_outer,full_indices.end(),l_inter_idx,a.dims()); r_idx+=internal::sub2ind(full_indices.begin()+L_outer,full_indices.begin()+L_outer+R_outer,r_outer_idx,b.dims()); r_idx+=internal::sub2ind(full_indices.begin()+L_outer+R_outer,full_indices.end(),r_inter_idx,b.dims()); // print_array(full_indices,"full_indices"); // print_array(l_outer_idx,"l_outer_idx"); // std::cout << "Outer l_idx " << internal::sub2ind(full_indices.begin(),full_indices.begin()+L_outer,l_outer_idx,a.dims()) << std::endl; // std::cout << "L_outer " << L_outer << " R_outer " << R_outer << " Inter " << Inter << std::endl; // std::cout << "l_idx " << l_idx << " r_idx " << r_idx << std::endl; return a.atIdx(l_idx)*b.atIdx(r_idx); }; c.get_function()=_function; return c; } template<typename MIA,typename otherMIA, typename array_type,size_t Inter,size_t L_outer,size_t R_outer> auto noLatticeMult(const MIA &a,const otherMIA &b,const std::array<array_type,Inter>&l_inter_idx,const std::array<array_type,L_outer>&l_outer_idx,const std::array<array_type,Inter>&r_inter_idx,const std::array<array_type,R_outer>&r_outer_idx) ->typename MIANoLatticeProductReturnType<MIA,otherMIA,L_outer+R_outer+Inter>::type { typedef typename MIANoLatticeProductReturnType<MIA,otherMIA,L_outer+R_outer+Inter>::type RetType; typedef typename internal::index_type<RetType>::type index_type; typedef typename internal::index_type<MIA>::type a_index_type; typedef typename internal::index_type<otherMIA>::type b_index_type; static_assert(internal::check_index_compatibility<index_type,array_type>::type::value,"Must use an array convertable to index_type"); std::array<a_index_type, Inter> l_inter_dims; std::array<a_index_type, L_outer> l_outer_dims; std::array<b_index_type, Inter> r_inter_dims; std::array<b_index_type, R_outer> r_outer_dims; //get inter and outer dimensionality and the individual dimensions that make up that number - should default to one if any of the arrays are empty size_t l_inter_size=internal::reorder_from(a.dims(), l_inter_idx,l_inter_dims); size_t r_inter_size= internal::reorder_from(b.dims(), r_inter_idx,r_inter_dims); if(l_inter_size!=r_inter_size || !std::equal(l_inter_dims.begin(),l_inter_dims.end(),r_inter_dims.begin())) throw DimensionMismatchException("Element-wise dimensions must match during MIA multiplication"); internal::reorder_from(a.dims(), l_outer_idx,l_outer_dims); internal::reorder_from(b.dims(), r_outer_idx,r_outer_dims); std::array<index_type,L_outer+R_outer+Inter> retDims; concat_arrays(l_outer_dims, r_outer_dims,l_inter_dims,retDims); RetType c(retDims); for(index_type idx=0;idx<c.dimensionality();++idx){ auto full_indices=c.ind2sub(idx); a_index_type l_idx(0); b_index_type r_idx(0); l_idx+=internal::sub2ind(full_indices.begin(),full_indices.begin()+L_outer,l_outer_idx,a.dims()); l_idx+=internal::sub2ind(full_indices.begin()+L_outer+R_outer,full_indices.end(),l_inter_idx,a.dims()); r_idx+=internal::sub2ind(full_indices.begin()+L_outer,full_indices.begin()+L_outer+R_outer,r_outer_idx,b.dims()); r_idx+=internal::sub2ind(full_indices.begin()+L_outer+R_outer,full_indices.end(),r_inter_idx,b.dims()); c.atIdx(idx)=a.atIdx(l_idx)*b.atIdx(r_idx); } return c; } //assumes C, A, and B are of the same dimensions and in the same sort order (and A and B are sorted), should work for either SparseLattices or SparseMIAs template<class C_Class, class B_Class, class A_Class,class Op> void outside_merge_sparse_storage_containers(C_Class & C, const A_Class & A, const B_Class & B, Op op) { using namespace boost::numeric; typedef typename internal::data_type<A_Class>::type a_data_type; C.clear(); C.reserve(A.size()+B.size()); auto a_begin=A.index_begin(); auto b_begin=B.index_begin(); auto a_end=A.index_end(); auto b_end=B.index_end(); while(a_begin<a_end && b_begin<b_end){ if (*a_begin<*b_begin){ C.push_back(C.convert(A.data_at(a_begin)),*a_begin); a_begin++; } else if (*b_begin<*a_begin){ C.push_back(C.convert(op(a_data_type(0),B.data_at(b_begin))),*b_begin); b_begin++; } else{ C.push_back(C.convert(op(A.data_at(a_begin),B.data_at(b_begin))),*a_begin); a_begin++; b_begin++; } } if (a_begin==a_end){ while (b_begin<b_end){ C.push_back(C.convert(op(a_data_type(0),B.data_at(b_begin))),*b_begin); b_begin++; } } else{ while (a_begin<a_end){ C.push_back(C.convert(A.data_at(a_begin)),*a_begin); a_begin++; } } } ////must be boost::tuples of iterators. Assumes a's container is sized to be a.size+b.size //template<class AStorageItType, class BStorageItType, class Op> //AStorageItType merge_sparse_storage_containers(AStorageItType a_begin,AStorageItType a_end,BStorageItType b_begin,BStorageItType b_end,Op op) //{ // using namespace boost::numeric; // typedef typename boost::remove_reference<typename BStorageItType::value_type::first_type>::type b_data_type; // typedef typename boost::remove_reference<typename AStorageItType::value_type::first_type>::type a_data_type; // // typedef converter<a_data_type,b_data_type,conversion_traits<a_data_type,b_data_type>,def_overflow_handler,RoundEven<b_data_type>> to_mdata_type; // AStorageItType a_actual_end=a_end; // AStorageItType a_actual_begin=a_begin; // while(a_begin<a_end && b_begin<b_end){ // if (std::get<1>(*a_begin)<std::get<1>(*b_begin)){ // a_begin++; // } // else if (std::get<1>(*b_begin)<std::get<1>(*a_begin)){ // std::get<0>(*a_actual_end)=op(a_data_type(0),to_mdata_type::convert(std::get<0>(*b_begin))); // std::get<1>(*a_actual_end++)=std::get<1>(*b_begin++); // // // } // else{ // std::get<0>(*a_begin)=op(std::get<0>(*a_begin),to_mdata_type::convert(std::get<0>(*b_begin))); // a_begin++; // b_begin++; // } // // } // if (a_begin==a_end){ // while (b_begin<b_end){ // std::get<0>(*a_actual_end)=op(a_data_type(0),to_mdata_type::convert(std::get<0>(*b_begin))); // std::get<1>(*a_actual_end++)=std::get<1>(*b_begin++); // } // } // // std::inplace_merge(a_actual_begin,a_end,a_actual_end,[](const typename AStorageItType::value_type& lhs, const typename AStorageItType::value_type& rhs) // { // return std::get<1>(lhs)<std::get<1>(rhs); // }); // // // return a_actual_end; // //} // ////must be boost::tuples of iterators. Assumes a's container is sized to be a.size+b.size //template<class ADataIt, class AIndexIt, class BDataIt, class BIndexIt,class Op> //ADataIt merge_sparse_storage_containers(ADataIt a_data_begin,ADataIt a_data_end,AIndexIt a_index_begin,AIndexIt a_index_end,BDataIt b_data_begin,BDataIt b_data_end,BIndexIt b_index_begin,BIndexIt b_index_end,Op op) //{ // using namespace boost::numeric; // typedef typename ADataIt::value_type a_data_type; // typedef typename BDataIt::value_type b_data_type; // // // typedef converter<a_data_type,b_data_type,conversion_traits<a_data_type,b_data_type>,def_overflow_handler,RoundEven<b_data_type>> to_mdata_type; // ADataIt a_actual_data_end=a_data_end; // AIndexIt a_actual_index_end=a_index_end; // ADataIt a_cur_data_it=a_data_begin; // AIndexIt a_cur_index_it=a_index_begin; // while(a_cur_data_it<a_data_end && b_data_begin<b_data_end){ // if (*a_cur_index_it<*b_index_begin){ // a_cur_index_it++; // a_cur_data_it++; // } // else if (*b_index_begin<*a_cur_index_it){ // *a_actual_data_end++=*b_data_begin++; // *a_actual_index_end++=*b_index_begin++; // // // } // else{ // *a_cur_data_it=op(*a_cur_data_it,to_mdata_type::convert(*b_data_begin++)); // a_cur_data_it++; // a_cur_index_it++; // b_index_begin++; // } // // } // if (a_cur_data_it==a_data_end){ // while (b_data_begin<b_data_end){ // *a_actual_data_end++=*b_data_begin++; // *a_actual_index_end++=*b_index_begin++; // // } // } //// std::cout << "Index\t Data in scan merge" << std::endl; //// auto j=a_index_begin; //// for(auto i=a_data_begin;i<a_actual_data_end;++i,++j) //// std::cout << *j << "\t " << *i << std::endl; //// //// std::cout << std::endl; //// //// std::cout << " diff " << a_index_end-a_index_begin << std::endl; // std::inplace_merge(make_sort_permute_iter(a_index_begin,a_data_begin), // make_sort_permute_iter(a_index_end,a_data_end), // make_sort_permute_iter(a_actual_index_end,a_actual_data_end), // sort_permute_iter_compare<AIndexIt,ADataIt>()); // //// std::inplace_merge(a_data_begin,a_data_end,a_actual_data_end,[&](const typename ADataIt::value_type& lhs, const typename ADataIt::value_type& rhs) //// { //// return *(a_index_begin+(&lhs-&(*a_data_begin))) <*(a_index_begin+(&rhs-&(*a_data_begin))); //// }); //// std::inplace_merge(a_index_begin,a_index_end,a_actual_index_end); // /*std::cout << " diff " << a_index_end-a_index_begin << std::endl; // std::cout << "Index\t Data in AFTER scan merge" << std::endl; // for(auto i=a_data_begin,j=a_index_begin;i<a_actual_data_end;++i,++j) // std::cout << *j << "\t " << *i << std::endl; // // std::cout << std::endl;*/ // return a_actual_data_end; // //} template<typename index_type,typename T, typename boost::enable_if< boost::is_integral<T>, int >::type=0 > Range<index_type> create_range(T t){ return Range<index_type>((index_type)t); //create range object based on t; } //if the current variadic template parameter is a Range object, simply just return it template<typename index_type> Range<index_type> & create_range(Range<index_type>&t){ return t; } //base case for variadic parameters - do not alter the iterator template<typename ItType> void get_range_array(ItType it){ return; } //if the current variable in the varadic template is an integral type, create a Range object for that type and add it using the current array iterator template<typename ItType,typename T,typename...Ranges> void get_range_array(ItType it,T t,Ranges...ranges){ typedef typename ItType::value_type RangeType; typedef typename RangeType::index_type index_type; *it=create_range<index_type>(t); get_range_array(it+1,ranges...); //recurse to next spot in the container and the next variadic template parameter } //! Converts a scalar value to data_type /*! \tparam from_data_type the data_type you are converting from */ template<class data_type,class from_data_type,typename boost::enable_if< boost::is_pod< from_data_type >, int >::type = 0> inline data_type convert(const from_data_type from){ using namespace boost::numeric; typedef boost::numeric::converter<data_type,boost::uniform_real<>::result_type> to_mdata_type; return to_mdata_type::convert(from); } struct print_class_name { template <typename T> void operator()( T t ) const { std::cout << typeid(t).name() << " "; } }; inline long double log2(const long double x){ return std::log(x) * LIBMIA_LOG2E; } template<typename T1> inline T1 manual_int_power(const T1 base,const int _exp){ T1 result=1; for(int i=0;i<_exp;++i) { result*=base; } return result; } /*! @} */ } template<class T> struct MIAprint { void operator() (T i) { std::cout << " " << i; } } ; template<class T> struct select_first { T& operator()(T&left, T& right){ return left; } }; template<class array_type> void print_array(const array_type & _array, const std::string &header){ std::cout << header; for(auto & _i:_array){ std::cout << " " << _i; } std::cout << std::endl; } template<class array_type> void print_array_on_line(const array_type & _array){ for(auto & _i:_array){ std::cout << " " << _i; } } template<class T1, class T2,size_t _size> bool compare_arrays(const std::array<T1,_size> & array1, const std::array<T2,_size> & array2){ typedef boost::numeric::converter<T1,T2> to_mdata_type; for(size_t i=0;i<_size;++i) if (array1[i]!=to_mdata_type::convert(array2[i])) return false; return true; } template<class data_type> struct array_converter { template<class other_data_type,size_t _size> static std::array<data_type,_size> convert(const std::array<other_data_type,_size> & _from) { typedef boost::numeric::converter<data_type,other_data_type> to_mdata_type; std::array<data_type,_size> ret; for(size_t i=0;i<_size;++i) ret[i]=to_mdata_type::convert(_from[i]); return ret; } template<size_t _size> static std::array<data_type,_size> convert(std::array<data_type,_size> & _from){ return _from; } }; //!prec must be positive template<typename T, typename T2,typename T3> inline bool isEqualFuzzy(T a, T2 b, T3 prec = Tolerance<T>::tolerance) { if(std::abs(a) < 1 || std::abs(b) < 1) return std::abs(a-b)<=prec; else{ return std::abs(a - b)<= std::min(std::abs(a), std::abs(b))*prec; } } //! Removes data with duplicated indices - conflicts are solved by using the collector class, ie std::plus<data_type> template<class index_it, class data_it,class Collector> size_t collect_duplicates_function(index_it index_begin, index_it index_end, data_it data_begin,Collector collector) { if (index_begin == index_end) return 0; auto result_idx = index_begin; auto result_data= data_begin; auto first=result_idx; auto first_data=data_begin; while (++first < index_end) { ++first_data; if (*result_idx != *first){ *(++result_idx)=*first; *(++result_data)=*first_data; } else{ *result_data=collector(*result_data,*first_data); } } return result_idx-index_begin+1; } } #endif // FUNCTION_UTIL_H
dynwave.c
//----------------------------------------------------------------------------- // dynwave.c // // Project: EPA SWMM5 // Version: 5.1 // Date: 03/20/14 (5.1.001) // 03/28/14 (5.1.002) // 09/15/14 (5.1.007) // 03/19/15 (5.1.008) // 08/01/16 (5.1.011) // 05/10/18 (5.1.013) // Author: L. Rossman (EPA) // M. Tryby (EPA) // R. Dickinson (CDM) // // Dynamic wave flow routing functions. // // This module solves the dynamic wave flow routing equations using // Picard Iterations (i.e., a method of successive approximations) // to solve the explicit form of the continuity and momentum equations // for conduits. // // Build 5.1.002: // - Only non-ponded nodal surface area is saved for use in // surcharge algorithm. // // Build 5.1.007: // - Node losses added to node outflow variable instead of treated // as a separate item when computing change in node flow volume. // // Build 5.1.008: // - Module-specific constants moved here from project.c. // - Support added for user-specified minimum variable time step. // - Node crown elevations found here instead of in flowrout.c module. // - OpenMP use to parallelize findLinkFlows() & findNodeDepths(). // - Bug in finding complete list of capacity limited links fixed. // // Build 5.1.011: // - Added test for failed memory allocation. // - Fixed illegal array index bug for Ideal Pumps. // // Build 5.1.013: // - Include omp.h protected against lack of compiler support for OpenMP. // - SurchargeMethod option used to decide how node surcharging is handled. // - Storage nodes allowed to pressurize if their surcharge depth > 0. // - Minimum flow needed to compute a Courant time step modified. // //----------------------------------------------------------------------------- #define _CRT_SECURE_NO_DEPRECATE #include "headers.h" #include <stdlib.h> #include <math.h> #if defined(_OPENMP) //(5.1.013) #include <omp.h> #endif //----------------------------------------------------------------------------- // Constants //----------------------------------------------------------------------------- static const double MINTIMESTEP = 0.001; // min. time step (sec) static const double OMEGA = 0.5; // under-relaxation parameter static const double DEFAULT_SURFAREA = 12.566; // Min. nodal surface area (~4 ft diam.) static const double DEFAULT_HEADTOL = 0.005; // Default head tolerance (ft) static const double EXTRAN_CROWN_CUTOFF = 0.96; // crown cutoff for EXTRAN //(5.1.013) static const double SLOT_CROWN_CUTOFF = 0.985257; // crown cutoff for SLOT //(5.1.013) static const int DEFAULT_MAXTRIALS = 8; // Max. trials per time step //----------------------------------------------------------------------------- // Data Structures //----------------------------------------------------------------------------- typedef struct { char converged; // TRUE if iterations for a node done double newSurfArea; // current surface area (ft2) double oldSurfArea; // previous surface area (ft2) double sumdqdh; // sum of dqdh from adjoining links double dYdT; // change in depth w.r.t. time (ft/sec) } TXnode; //----------------------------------------------------------------------------- // Shared Variables //----------------------------------------------------------------------------- static double VariableStep; // size of variable time step (sec) static TXnode* Xnode; // extended nodal information static double Omega; // actual under-relaxation parameter static int Steps; // number of Picard iterations //----------------------------------------------------------------------------- // Function declarations //----------------------------------------------------------------------------- static void initRoutingStep(void); static void initNodeStates(void); static void findBypassedLinks(); static void findLimitedLinks(); static void findLinkFlows(double dt); static int isTrueConduit(int link); static void findNonConduitFlow(int link, double dt); static void findNonConduitSurfArea(int link); static double getModPumpFlow(int link, double q, double dt); static void updateNodeFlows(int link); static int findNodeDepths(double dt); static void setNodeDepth(int node, double dt); static double getFloodedDepth(int node, int canPond, double dV, double yNew, double yMax, double dt); static double getVariableStep(double maxStep); static double getLinkStep(double tMin, int *minLink); static double getNodeStep(double tMin, int *minNode); //============================================================================= void dynwave_init() // // Input: none // Output: none // Purpose: initializes dynamic wave routing method. // { int i, j; double z; VariableStep = 0.0; Xnode = (TXnode *) calloc(Nobjects[NODE], sizeof(TXnode)); if ( Xnode == NULL ) { report_writeErrorMsg(ERR_MEMORY, " Not enough memory for dynamic wave routing."); return; } // --- initialize node surface areas & crown elev. for (i = 0; i < Nobjects[NODE]; i++ ) { Xnode[i].newSurfArea = 0.0; Xnode[i].oldSurfArea = 0.0; Node[i].crownElev = Node[i].invertElev; } // --- initialize links & update node crown elevations for (i = 0; i < Nobjects[LINK]; i++) { j = Link[i].node1; z = Node[j].invertElev + Link[i].offset1 + Link[i].xsect.yFull; Node[j].crownElev = MAX(Node[j].crownElev, z); j = Link[i].node2; z = Node[j].invertElev + Link[i].offset2 + Link[i].xsect.yFull; Node[j].crownElev = MAX(Node[j].crownElev, z); Link[i].flowClass = DRY; Link[i].dqdh = 0.0; } // --- set crown cutoff for finding top width of closed conduits //(5.1.013) if ( SurchargeMethod == SLOT ) CrownCutoff = SLOT_CROWN_CUTOFF; //(5.1.013) else CrownCutoff = EXTRAN_CROWN_CUTOFF; //(5.1.013) } //============================================================================= void dynwave_close() // // Input: none // Output: none // Purpose: frees memory allocated for dynamic wave routing method. // { FREE(Xnode); } //============================================================================= void dynwave_validate() // // Input: none // Output: none // Purpose: adjusts dynamic wave routing options. // { if ( MinRouteStep > RouteStep ) MinRouteStep = RouteStep; if ( MinRouteStep < MINTIMESTEP ) MinRouteStep = MINTIMESTEP; if ( MinSurfArea == 0.0 ) MinSurfArea = DEFAULT_SURFAREA; else MinSurfArea /= UCF(LENGTH) * UCF(LENGTH); if ( HeadTol == 0.0 ) HeadTol = DEFAULT_HEADTOL; else HeadTol /= UCF(LENGTH); if ( MaxTrials == 0 ) MaxTrials = DEFAULT_MAXTRIALS; } //============================================================================= double dynwave_getRoutingStep(double fixedStep) // // Input: fixedStep = user-supplied fixed time step (sec) // Output: returns routing time step (sec) // Purpose: computes variable routing time step if applicable. // { // --- use user-supplied fixed step if variable step option turned off // or if its smaller than the min. allowable variable time step if ( CourantFactor == 0.0 ) return fixedStep; if ( fixedStep < MINTIMESTEP ) return fixedStep; // --- at start of simulation (when current variable step is zero) // use the minimum allowable time step if ( VariableStep == 0.0 ) { VariableStep = MinRouteStep; } // --- otherwise compute variable step based on current flow solution else VariableStep = getVariableStep(fixedStep); // --- adjust step to be a multiple of a millisecond VariableStep = floor(1000.0 * VariableStep) / 1000.0; return VariableStep; } //============================================================================= int dynwave_execute(double tStep) // // Input: links = array of topo sorted links indexes // tStep = time step (sec) // Output: returns number of iterations used // Purpose: routes flows through drainage network over current time step. // { int converged; // --- initialize if ( ErrorCode ) return 0; Steps = 0; converged = FALSE; Omega = OMEGA; initRoutingStep(); // --- keep iterating until convergence while ( Steps < MaxTrials ) { // --- execute a routing step & check for nodal convergence initNodeStates(); findLinkFlows(tStep); converged = findNodeDepths(tStep); Steps++; if ( Steps > 1 ) { if ( converged ) break; // --- check if link calculations can be skipped in next step findBypassedLinks(); } } if ( !converged ) NonConvergeCount++; // --- identify any capacity-limited conduits findLimitedLinks(); return Steps; } //============================================================================= void initRoutingStep() { int i; for (i = 0; i < Nobjects[NODE]; i++) { Xnode[i].converged = FALSE; Xnode[i].dYdT = 0.0; } for (i = 0; i < Nobjects[LINK]; i++) { Link[i].bypassed = FALSE; Link[i].surfArea1 = 0.0; Link[i].surfArea2 = 0.0; } // --- a2 preserves conduit area from solution at last time step for ( i = 0; i < Nlinks[CONDUIT]; i++) Conduit[i].a2 = Conduit[i].a1; } //============================================================================= void initNodeStates() // // Input: none // Output: none // Purpose: initializes node's surface area, inflow & outflow // { int i; for (i = 0; i < Nobjects[NODE]; i++) { // --- initialize nodal surface area if ( AllowPonding ) { Xnode[i].newSurfArea = node_getPondedArea(i, Node[i].newDepth); } else { Xnode[i].newSurfArea = node_getSurfArea(i, Node[i].newDepth); } /* //// Removed for release 5.1.013. /// //(5.1.013) if ( Xnode[i].newSurfArea < MinSurfArea ) { Xnode[i].newSurfArea = MinSurfArea; } */ // --- initialize nodal inflow & outflow Node[i].inflow = 0.0; Node[i].outflow = Node[i].losses; if ( Node[i].newLatFlow >= 0.0 ) { Node[i].inflow += Node[i].newLatFlow; } else { Node[i].outflow -= Node[i].newLatFlow; } Xnode[i].sumdqdh = 0.0; } } //============================================================================= void findBypassedLinks() { int i; for (i = 0; i < Nobjects[LINK]; i++) { if ( Xnode[Link[i].node1].converged && Xnode[Link[i].node2].converged ) Link[i].bypassed = TRUE; else Link[i].bypassed = FALSE; } } //============================================================================= void findLimitedLinks() // // Input: none // Output: none // Purpose: determines if a conduit link is capacity limited. // { int j, n1, n2, k; double h1, h2; for (j = 0; j < Nobjects[LINK]; j++) { // ---- check only non-dummy conduit links if ( !isTrueConduit(j) ) continue; // --- check that upstream end is full k = Link[j].subIndex; Conduit[k].capacityLimited = FALSE; if ( Conduit[k].a1 >= Link[j].xsect.aFull ) { // --- check if HGL slope > conduit slope n1 = Link[j].node1; n2 = Link[j].node2; h1 = Node[n1].newDepth + Node[n1].invertElev; h2 = Node[n2].newDepth + Node[n2].invertElev; if ( (h1 - h2) > fabs(Conduit[k].slope) * Conduit[k].length ) Conduit[k].capacityLimited = TRUE; } } } //============================================================================= void findLinkFlows(double dt) { int i; // --- find new flow in each non-dummy conduit #pragma omp parallel num_threads(NumThreads) { #pragma omp for for ( i = 0; i < Nobjects[LINK]; i++) { if ( isTrueConduit(i) && !Link[i].bypassed ) dwflow_findConduitFlow(i, Steps, Omega, dt); } } // --- update inflow/outflows for nodes attached to non-dummy conduits for ( i = 0; i < Nobjects[LINK]; i++) { if ( isTrueConduit(i) ) updateNodeFlows(i); } // --- find new flows for all dummy conduits, pumps & regulators for ( i = 0; i < Nobjects[LINK]; i++) { if ( !isTrueConduit(i) ) { if ( !Link[i].bypassed ) findNonConduitFlow(i, dt); updateNodeFlows(i); } } } //============================================================================= int isTrueConduit(int j) { return ( Link[j].type == CONDUIT && Link[j].xsect.type != DUMMY ); } //============================================================================= void findNonConduitFlow(int i, double dt) // // Input: i = link index // dt = time step (sec) // Output: none // Purpose: finds new flow in a non-conduit-type link // { double qLast; // previous link flow (cfs) double qNew; // new link flow (cfs) // --- get link flow from last iteration qLast = Link[i].newFlow; Link[i].dqdh = 0.0; // --- get new inflow to link from its upstream node // (link_getInflow returns 0 if flap gate closed or pump is offline) qNew = link_getInflow(i); if ( Link[i].type == PUMP ) qNew = getModPumpFlow(i, qNew, dt); // --- find surface area at each end of link findNonConduitSurfArea(i); // --- apply under-relaxation with flow from previous iteration; // --- do not allow flow to change direction without first being 0 if ( Steps > 0 && Link[i].type != PUMP ) { qNew = (1.0 - Omega) * qLast + Omega * qNew; if ( qNew * qLast < 0.0 ) qNew = 0.001 * SGN(qNew); } Link[i].newFlow = qNew; } //============================================================================= double getModPumpFlow(int i, double q, double dt) // // Input: i = link index // q = pump flow from pump curve (cfs) // dt = time step (sec) // Output: returns modified pump flow rate (cfs) // Purpose: modifies pump curve pumping rate depending on amount of water // available at pump's inlet node. // { int j = Link[i].node1; // pump's inlet node index int k = Link[i].subIndex; // pump's index double newNetInflow; // inflow - outflow rate (cfs) double netFlowVolume; // inflow - outflow volume (ft3) double y; // node depth (ft) if ( q == 0.0 ) return q; // --- case where inlet node is a storage node: // prevent node volume from going negative if ( Node[j].type == STORAGE ) return node_getMaxOutflow(j, q, dt); // --- case where inlet is a non-storage node switch ( Pump[k].type ) { // --- for Type1 pump, a volume is computed for inlet node, // so make sure it doesn't go negative case TYPE1_PUMP: return node_getMaxOutflow(j, q, dt); // --- for other types of pumps, if pumping rate would make depth // at upstream node negative, then set pumping rate = inflow case TYPE2_PUMP: case TYPE4_PUMP: case TYPE3_PUMP: newNetInflow = Node[j].inflow - Node[j].outflow - q; netFlowVolume = 0.5 * (Node[j].oldNetInflow + newNetInflow ) * dt; y = Node[j].oldDepth + netFlowVolume / Xnode[j].newSurfArea; if ( y <= 0.0 ) return Node[j].inflow; } return q; } //============================================================================= void findNonConduitSurfArea(int i) // // Input: i = link index // Output: none // Purpose: finds the surface area contributed by a non-conduit // link to its upstream and downstream nodes. // { if ( Link[i].type == ORIFICE ) { Link[i].surfArea1 = Orifice[Link[i].subIndex].surfArea / 2.; } // --- no surface area for weirs to maintain SWMM 4 compatibility else Link[i].surfArea1 = 0.0; Link[i].surfArea2 = Link[i].surfArea1; if ( Link[i].flowClass == UP_CRITICAL || Node[Link[i].node1].type == STORAGE ) Link[i].surfArea1 = 0.0; if ( Link[i].flowClass == DN_CRITICAL || Node[Link[i].node2].type == STORAGE ) Link[i].surfArea2 = 0.0; } //============================================================================= void updateNodeFlows(int i) // // Input: i = link index // q = link flow rate (cfs) // Output: none // Purpose: updates cumulative inflow & outflow at link's end nodes. // { int k; int barrels = 1; int n1 = Link[i].node1; int n2 = Link[i].node2; double q = Link[i].newFlow; double uniformLossRate = 0.0; // --- compute any uniform seepage loss from a conduit if ( Link[i].type == CONDUIT ) { k = Link[i].subIndex; uniformLossRate = Conduit[k].evapLossRate + Conduit[k].seepLossRate; barrels = Conduit[k].barrels; } // --- update total inflow & outflow at upstream/downstream nodes if ( q >= 0.0 ) { Node[n1].outflow += q + uniformLossRate; Node[n2].inflow += q; } else { Node[n1].inflow -= q; Node[n2].outflow -= q - uniformLossRate; } // --- add surf. area contributions to upstream/downstream nodes Xnode[Link[i].node1].newSurfArea += Link[i].surfArea1 * barrels; Xnode[Link[i].node2].newSurfArea += Link[i].surfArea2 * barrels; // --- update summed value of dqdh at each end node Xnode[Link[i].node1].sumdqdh += Link[i].dqdh; if ( Link[i].type == PUMP ) { k = Link[i].subIndex; if ( Pump[k].type != TYPE4_PUMP ) { Xnode[n2].sumdqdh += Link[i].dqdh; } } else Xnode[n2].sumdqdh += Link[i].dqdh; } //============================================================================= int findNodeDepths(double dt) { int i; int converged; // convergence flag double yOld; // previous node depth (ft) // --- compute outfall depths based on flow in connecting link for ( i = 0; i < Nobjects[LINK]; i++ ) link_setOutfallDepth(i); // --- compute new depth for all non-outfall nodes and determine if // depth change from previous iteration is below tolerance converged = TRUE; #pragma omp parallel num_threads(NumThreads) { #pragma omp for private(yOld) for ( i = 0; i < Nobjects[NODE]; i++ ) { if ( Node[i].type == OUTFALL ) continue; yOld = Node[i].newDepth; setNodeDepth(i, dt); Xnode[i].converged = TRUE; if ( fabs(yOld - Node[i].newDepth) > HeadTol ) { converged = FALSE; Xnode[i].converged = FALSE; } } } return converged; } //============================================================================= void setNodeDepth(int i, double dt) // // Input: i = node index // dt = time step (sec) // Output: none // Purpose: sets depth at non-outfall node after current time step. // { int canPond; // TRUE if node can pond overflows int isPonded; // TRUE if node is currently ponded int isSurcharged = FALSE; // TRUE if node is surcharged //(5.1.013) double dQ; // inflow minus outflow at node (cfs) double dV; // change in node volume (ft3) double dy; // change in node depth (ft) double yMax; // max. depth at node (ft) double yOld; // node depth at previous time step (ft) double yLast; // previous node depth (ft) double yNew; // new node depth (ft) double yCrown; // depth to node crown (ft) double surfArea; // node surface area (ft2) double denom; // denominator term double corr; // correction factor double f; // relative surcharge depth // --- see if node can pond water above it canPond = (AllowPonding && Node[i].pondedArea > 0.0); isPonded = (canPond && Node[i].newDepth > Node[i].fullDepth); // --- initialize values yCrown = Node[i].crownElev - Node[i].invertElev; yOld = Node[i].oldDepth; yLast = Node[i].newDepth; Node[i].overflow = 0.0; surfArea = Xnode[i].newSurfArea; surfArea = MAX(surfArea, MinSurfArea); //(5.1.013) // --- determine average net flow volume into node over the time step dQ = Node[i].inflow - Node[i].outflow; dV = 0.5 * (Node[i].oldNetInflow + dQ) * dt; //// Following code segment added to release 5.1.013. //// //(5.1.013) // --- determine if node is EXTRAN surcharged if (SurchargeMethod == EXTRAN) { // --- ponded nodes don't surcharge if (isPonded) isSurcharged = FALSE; // --- closed storage units that are full are in surcharge else if (Node[i].type == STORAGE) { isSurcharged = (Node[i].surDepth > 0.0 && yLast > Node[i].fullDepth); } // --- surcharge occurs when node depth exceeds top of its highest link else isSurcharged = (yCrown > 0.0 && yLast > yCrown); } ///////////////////////////////////////////////////////////// // --- if node not surcharged, base depth change on surface area if (!isSurcharged) //(5.1.013) { dy = dV / surfArea; yNew = yOld + dy; // --- save non-ponded surface area for use in surcharge algorithm if ( !isPonded ) Xnode[i].oldSurfArea = surfArea; // --- apply under-relaxation to new depth estimate if ( Steps > 0 ) { yNew = (1.0 - Omega) * yLast + Omega * yNew; } // --- don't allow a ponded node to drop much below full depth if ( isPonded && yNew < Node[i].fullDepth ) yNew = Node[i].fullDepth - FUDGE; } // --- if node surcharged, base depth change on dqdh // NOTE: depth change is w.r.t depth from previous // iteration; also, do not apply under-relaxation. else { // --- apply correction factor for upstream terminal nodes corr = 1.0; if ( Node[i].degree < 0 ) corr = 0.6; // --- allow surface area from last non-surcharged condition // to influence dqdh if depth close to crown depth denom = Xnode[i].sumdqdh; if ( yLast < 1.25 * yCrown ) { f = (yLast - yCrown) / yCrown; denom += (Xnode[i].oldSurfArea/dt - Xnode[i].sumdqdh) * exp(-15.0 * f); } // --- compute new estimate of node depth if ( denom == 0.0 ) dy = 0.0; else dy = corr * dQ / denom; yNew = yLast + dy; if ( yNew < yCrown ) yNew = yCrown - FUDGE; // --- don't allow a newly ponded node to rise much above full depth if ( canPond && yNew > Node[i].fullDepth ) yNew = Node[i].fullDepth + FUDGE; } // --- depth cannot be negative if ( yNew < 0 ) yNew = 0.0; // --- determine max. non-flooded depth yMax = Node[i].fullDepth; if ( canPond == FALSE ) yMax += Node[i].surDepth; // --- find flooded depth & volume if ( yNew > yMax ) { yNew = getFloodedDepth(i, canPond, dV, yNew, yMax, dt); } else Node[i].newVolume = node_getVolume(i, yNew); // --- compute change in depth w.r.t. time Xnode[i].dYdT = fabs(yNew - yOld) / dt; // --- save new depth for node Node[i].newDepth = yNew; } //============================================================================= double getFloodedDepth(int i, int canPond, double dV, double yNew, double yMax, double dt) // // Input: i = node index // canPond = TRUE if water can pond over node // isPonded = TRUE if water is currently ponded // dV = change in volume over time step (ft3) // yNew = current depth at node (ft) // yMax = max. depth at node before ponding (ft) // dt = time step (sec) // Output: returns depth at node when flooded (ft) // Purpose: computes depth, volume and overflow for a flooded node. // { if ( canPond == FALSE ) { Node[i].overflow = dV / dt; Node[i].newVolume = Node[i].fullVolume; yNew = yMax; } else { Node[i].newVolume = MAX((Node[i].oldVolume+dV), Node[i].fullVolume); Node[i].overflow = (Node[i].newVolume - MAX(Node[i].oldVolume, Node[i].fullVolume)) / dt; } if ( Node[i].overflow < FUDGE ) Node[i].overflow = 0.0; return yNew; } //============================================================================= double getVariableStep(double maxStep) // // Input: maxStep = user-supplied max. time step (sec) // Output: returns time step (sec) // Purpose: finds time step that satisfies stability criterion but // is no greater than the user-supplied max. time step. // { int minLink = -1; // index of link w/ min. time step int minNode = -1; // index of node w/ min. time step double tMin; // allowable time step (sec) double tMinLink; // allowable time step for links (sec) double tMinNode; // allowable time step for nodes (sec) // --- find stable time step for links & then nodes tMin = maxStep; tMinLink = getLinkStep(tMin, &minLink); tMinNode = getNodeStep(tMinLink, &minNode); // --- use smaller of the link and node time step tMin = tMinLink; if ( tMinNode < tMin ) { tMin = tMinNode ; minLink = -1; } // --- update count of times the minimum node or link was critical stats_updateCriticalTimeCount(minNode, minLink); // --- don't let time step go below an absolute minimum if ( tMin < MinRouteStep ) tMin = MinRouteStep; return tMin; } //============================================================================= double getLinkStep(double tMin, int *minLink) // // Input: tMin = critical time step found so far (sec) // Output: minLink = index of link with critical time step; // returns critical time step (sec) // Purpose: finds critical time step for conduits based on Courant criterion. // { int i; // link index int k; // conduit index double q; // conduit flow (cfs) double t; // time step (sec) double tLink = tMin; // critical link time step (sec) // --- examine each conduit link for ( i = 0; i < Nobjects[LINK]; i++ ) { if ( Link[i].type == CONDUIT ) { // --- skip conduits with negligible flow, area or Fr k = Link[i].subIndex; q = fabs(Link[i].newFlow) / Conduit[k].barrels; if ( q <= FUDGE //(5.1.013) || Conduit[k].a1 <= FUDGE || Link[i].froude <= 0.01 ) continue; // --- compute time step to satisfy Courant condition t = Link[i].newVolume / Conduit[k].barrels / q; t = t * Conduit[k].modLength / link_getLength(i); t = t * Link[i].froude / (1.0 + Link[i].froude) * CourantFactor; // --- update critical link time step if ( t < tLink ) { tLink = t; *minLink = i; } } } return tLink; } //============================================================================= double getNodeStep(double tMin, int *minNode) // // Input: tMin = critical time step found so far (sec) // Output: minNode = index of node with critical time step; // returns critical time step (sec) // Purpose: finds critical time step for nodes based on max. allowable // projected change in depth. // { int i; // node index double maxDepth; // max. depth allowed at node (ft) double dYdT; // change in depth per unit time (ft/sec) double t1; // time needed to reach depth limit (sec) double tNode = tMin; // critical node time step (sec) // --- find smallest time so that estimated change in nodal depth // does not exceed safety factor * maxdepth for ( i = 0; i < Nobjects[NODE]; i++ ) { // --- see if node can be skipped if ( Node[i].type == OUTFALL ) continue; if ( Node[i].newDepth <= FUDGE) continue; if ( Node[i].newDepth + FUDGE >= Node[i].crownElev - Node[i].invertElev ) continue; // --- define max. allowable depth change using crown elevation maxDepth = (Node[i].crownElev - Node[i].invertElev) * 0.25; if ( maxDepth < FUDGE ) continue; dYdT = Xnode[i].dYdT; if (dYdT < FUDGE ) continue; // --- compute time to reach max. depth & compare with critical time t1 = maxDepth / dYdT; if ( t1 < tNode ) { tNode = t1; *minNode = i; } } return tNode; }
GB_helper.c
//------------------------------------------------------------------------------ // GB_helper.c: helper functions for @GrB interface //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // TODO::: move these into the @GrB interface instead // These functions are only used by the @GrB interface for // SuiteSparse:GraphBLAS. #include "GB_helper.h" //------------------------------------------------------------------------------ // GB_NTHREADS: determine the number of threads to use //------------------------------------------------------------------------------ #define GB_NTHREADS(work) \ int nthreads_max = GB_Global_nthreads_max_get ( ) ; \ double chunk = GB_Global_chunk_get ( ) ; \ int nthreads = GB_nthreads (work, chunk, nthreads_max) ; //------------------------------------------------------------------------------ // GB_ALLOCATE_WORK: allocate per-thread workspace //------------------------------------------------------------------------------ #define GB_ALLOCATE_WORK(work_type) \ size_t Work_size ; \ work_type *Work = GB_MALLOC_WORK (nthreads, work_type, &Work_size) ; \ if (Work == NULL) return (false) ; //------------------------------------------------------------------------------ // GB_FREE_WORKSPACE: free per-thread workspace //------------------------------------------------------------------------------ #define GB_FREE_WORKSPACE \ GB_FREE_WORK (&Work, Work_size) ; //------------------------------------------------------------------------------ // GB_helper1: convert 0-based indices to 1-based for gbextracttuples //------------------------------------------------------------------------------ void GB_helper1 // convert zero-based indices to one-based ( double *restrict I_double, // output array const GrB_Index *restrict I, // input array int64_t nvals // size of input and output arrays ) { GB_NTHREADS (nvals) ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < nvals ; k++) { I_double [k] = (double) (I [k] + 1) ; } } //------------------------------------------------------------------------------ // GB_helper1i: convert 0-based indices to 1-based for gbextracttuples //------------------------------------------------------------------------------ void GB_helper1i // convert zero-based indices to one-based ( int64_t *restrict I, // input/output array int64_t nvals // size of input/output array ) { GB_NTHREADS (nvals) ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < nvals ; k++) { I [k] ++ ; } } //------------------------------------------------------------------------------ // GB_helper3: convert 1-based indices to 0-based for gb_mxarray_to_list //------------------------------------------------------------------------------ bool GB_helper3 // return true if OK, false on error ( int64_t *restrict List, // size len, output array const double *restrict List_double, // size len, input array int64_t len, int64_t *List_max // also compute the max entry in the list ) { GB_NTHREADS (len) ; ASSERT (List != NULL) ; ASSERT (List_double != NULL) ; ASSERT (List_max != NULL) ; bool ok = true ; int64_t listmax = -1 ; GB_ALLOCATE_WORK (int64_t) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { bool my_ok = true ; int64_t k1, k2, my_listmax = -1 ; GB_PARTITION (k1, k2, len, tid, nthreads) ; for (int64_t k = k1 ; k < k2 ; k++) { double x = List_double [k] ; int64_t i = (int64_t) x ; my_ok = my_ok && (x == (double) i) ; my_listmax = GB_IMAX (my_listmax, i) ; List [k] = i - 1 ; } // rather than create a separate per-thread boolean workspace, just // use a sentinal value of INT64_MIN if non-integer indices appear // in List_double. Work [tid] = my_ok ? my_listmax : INT64_MIN ; } // wrapup for (tid = 0 ; tid < nthreads ; tid++) { listmax = GB_IMAX (listmax, Work [tid]) ; ok = ok && (Work [tid] != INT64_MIN) ; } GB_FREE_WORKSPACE ; (*List_max) = listmax ; return (ok) ; } //------------------------------------------------------------------------------ // GB_helper3i: convert 1-based indices to 0-based for gb_mxarray_to_list //------------------------------------------------------------------------------ bool GB_helper3i // return true if OK, false on error ( int64_t *restrict List, // size len, output array const int64_t *restrict List_int64, // size len, input array int64_t len, int64_t *List_max // also compute the max entry in the list ) { GB_NTHREADS (len) ; int64_t listmax = -1 ; GB_ALLOCATE_WORK (int64_t) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { int64_t k1, k2, my_listmax = -1 ; GB_PARTITION (k1, k2, len, tid, nthreads) ; for (int64_t k = k1 ; k < k2 ; k++) { int64_t i = List_int64 [k] ; my_listmax = GB_IMAX (my_listmax, i) ; List [k] = i - 1 ; } Work [tid] = my_listmax ; } // wrapup for (tid = 0 ; tid < nthreads ; tid++) { listmax = GB_IMAX (listmax, Work [tid]) ; } GB_FREE_WORKSPACE ; (*List_max) = listmax ; return (true) ; } //------------------------------------------------------------------------------ // GB_helper4: find the max entry in an index list for gbbuild //------------------------------------------------------------------------------ bool GB_helper4 // return true if OK, false on error ( const GrB_Index *restrict I, // array of size len const int64_t len, GrB_Index *List_max // find max (I) + 1 ) { GB_NTHREADS (len) ; GrB_Index listmax = 0 ; GB_ALLOCATE_WORK (GrB_Index) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { int64_t k1, k2 ; GrB_Index my_listmax = 0 ; GB_PARTITION (k1, k2, len, tid, nthreads) ; for (int64_t k = k1 ; k < k2 ; k++) { my_listmax = GB_IMAX (my_listmax, I [k]) ; } Work [tid] = my_listmax ; } // wrapup for (tid = 0 ; tid < nthreads ; tid++) { listmax = GB_IMAX (listmax, Work [tid]) ; } GB_FREE_WORKSPACE ; if (len > 0) listmax++ ; (*List_max) = listmax ; return (true) ; } //------------------------------------------------------------------------------ // GB_helper5: construct pattern of S for gblogassign //------------------------------------------------------------------------------ void GB_helper5 // construct pattern of S ( GrB_Index *restrict Si, // array of size anz GrB_Index *restrict Sj, // array of size anz const GrB_Index *restrict Mi, // array of size mnz, M->i, may be NULL const GrB_Index *restrict Mj, // array of size mnz, const int64_t mvlen, // M->vlen GrB_Index *restrict Ai, // array of size anz, A->i, may be NULL const int64_t avlen, // M->vlen const GrB_Index anz ) { GB_NTHREADS (anz) ; ASSERT (Mj != NULL) ; ASSERT (Si != NULL) ; ASSERT (Sj != NULL) ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < anz ; k++) { int64_t i = GBI (Ai, k, avlen) ; Si [k] = GBI (Mi, i, mvlen) ; Sj [k] = Mj [i] ; } } //------------------------------------------------------------------------------ // GB_helper7: Kx = uint64 (0:mnz-1), for gblogextract //------------------------------------------------------------------------------ // TODO: use GrB_apply with a positional operator instead void GB_helper7 // Kx = uint64 (0:mnz-1) ( uint64_t *restrict Kx, // array of size mnz const GrB_Index mnz ) { GB_NTHREADS (mnz) ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < mnz ; k++) { Kx [k] = k ; } } //------------------------------------------------------------------------------ // GB_helper8: expand a scalar into an array for gbbuild //------------------------------------------------------------------------------ // TODO: use GrB_assign instead void GB_helper8 ( GB_void *C, // output array of size nvals * s GB_void *A, // input scalar of size s GrB_Index nvals, // size of C size_t s // size of each scalar ) { GB_NTHREADS (nvals) ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < nvals ; k++) { // C [k] = A [0] memcpy (C + k * s, A, s) ; } } //------------------------------------------------------------------------------ // GB_helper10: compute norm (x-y,p) of two dense FP32 or FP64 vectors //------------------------------------------------------------------------------ // p can be: // 0 or 2: 2-norm, sqrt (sum ((x-y).^2)) // 1: 1-norm, sum (abs (x-y)) // INT64_MAX inf-norm, max (abs (x-y)) // INT64_MIN (-inf)-norm, min (abs (x-y)) // other: p-norm not yet computed double GB_helper10 // norm (x-y,p), or -1 on error ( GB_void *x_arg, // float or double, depending on type parameter bool x_iso, // true if x is iso GB_void *y_arg, // same type as x, treat as zero if NULL bool y_iso, // true if x is iso GrB_Type type, // GrB_FP32 or GrB_FP64 int64_t p, // 0, 1, 2, INT64_MIN, or INT64_MAX GrB_Index n ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- if (!(type == GrB_FP32 || type == GrB_FP64)) { // type of x and y must be GrB_FP32 or GrB_FP64 return ((double) -1) ; } if (n == 0) { return ((double) 0) ; } //-------------------------------------------------------------------------- // allocate workspace and determine # of threads to use //-------------------------------------------------------------------------- GB_NTHREADS (n) ; GB_ALLOCATE_WORK (double) ; #define X(k) x [x_iso ? 0 : k] #define Y(k) y [y_iso ? 0 : k] //-------------------------------------------------------------------------- // each thread computes its partial norm //-------------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { int64_t k1, k2 ; GB_PARTITION (k1, k2, n, tid, nthreads) ; if (type == GrB_FP32) { //------------------------------------------------------------------ // FP32 case //------------------------------------------------------------------ float my_s = 0 ; const float *x = (float *) x_arg ; const float *y = (float *) y_arg ; switch (p) { case 0: // Frobenius norm case 2: // 2-norm: sqrt of sum of (x-y).^2 { if (y == NULL) { for (int64_t k = k1 ; k < k2 ; k++) { float t = X (k) ; my_s += (t*t) ; } } else { for (int64_t k = k1 ; k < k2 ; k++) { float t = (X (k) - Y (k)) ; my_s += (t*t) ; } } } break ; case 1: // 1-norm: sum (abs (x-y)) { if (y == NULL) { for (int64_t k = k1 ; k < k2 ; k++) { my_s += fabsf (X (k)) ; } } else { for (int64_t k = k1 ; k < k2 ; k++) { my_s += fabsf (X (k) - Y (k)) ; } } } break ; case INT64_MAX: // inf-norm: max (abs (x-y)) { if (y == NULL) { for (int64_t k = k1 ; k < k2 ; k++) { my_s = fmaxf (my_s, fabsf (X (k))) ; } } else { for (int64_t k = k1 ; k < k2 ; k++) { my_s = fmaxf (my_s, fabsf (X (k) - Y (k))) ; } } } break ; case INT64_MIN: // (-inf)-norm: min (abs (x-y)) { my_s = INFINITY ; if (y == NULL) { for (int64_t k = k1 ; k < k2 ; k++) { my_s = fminf (my_s, fabsf (X (k))) ; } } else { for (int64_t k = k1 ; k < k2 ; k++) { my_s = fminf (my_s, fabsf (X (k) - Y (k))) ; } } } break ; default: ; // p-norm not yet supported } Work [tid] = (double) my_s ; } else { //------------------------------------------------------------------ // FP64 case //------------------------------------------------------------------ double my_s = 0 ; const double *x = (double *) x_arg ; const double *y = (double *) y_arg ; switch (p) { case 0: // Frobenius norm case 2: // 2-norm: sqrt of sum of (x-y).^2 { if (y == NULL) { for (int64_t k = k1 ; k < k2 ; k++) { double t = X (k) ; my_s += (t*t) ; } } else { for (int64_t k = k1 ; k < k2 ; k++) { double t = (X (k) - Y (k)) ; my_s += (t*t) ; } } } break ; case 1: // 1-norm: sum (abs (x-y)) { if (y == NULL) { for (int64_t k = k1 ; k < k2 ; k++) { my_s += fabs (X (k)) ; } } else { for (int64_t k = k1 ; k < k2 ; k++) { my_s += fabs (X (k) - Y (k)) ; } } } break ; case INT64_MAX: // inf-norm: max (abs (x-y)) { if (y == NULL) { for (int64_t k = k1 ; k < k2 ; k++) { my_s = fmax (my_s, fabs (X (k))) ; } } else { for (int64_t k = k1 ; k < k2 ; k++) { my_s = fmax (my_s, fabs (X (k) - Y (k))) ; } } } break ; case INT64_MIN: // (-inf)-norm: min (abs (x-y)) { my_s = INFINITY ; if (y == NULL) { for (int64_t k = k1 ; k < k2 ; k++) { my_s = fmin (my_s, fabs (X (k))) ; } } else { for (int64_t k = k1 ; k < k2 ; k++) { my_s = fmin (my_s, fabs (X (k) - Y (k))) ; } } } break ; default: ; // p-norm not yet supported } Work [tid] = my_s ; } } //-------------------------------------------------------------------------- // combine results of each thread //-------------------------------------------------------------------------- double s = 0 ; switch (p) { case 0: // Frobenius norm case 2: // 2-norm: sqrt of sum of (x-y).^2 { for (int64_t tid = 0 ; tid < nthreads ; tid++) { s += Work [tid] ; } s = sqrt (s) ; } break ; case 1: // 1-norm: sum (abs (x-y)) { for (int64_t tid = 0 ; tid < nthreads ; tid++) { s += Work [tid] ; } } break ; case INT64_MAX: // inf-norm: max (abs (x-y)) { for (int64_t tid = 0 ; tid < nthreads ; tid++) { s = fmax (s, Work [tid]) ; } } break ; case INT64_MIN: // (-inf)-norm: min (abs (x-y)) { s = Work [0] ; for (int64_t tid = 1 ; tid < nthreads ; tid++) { s = fmin (s, Work [tid]) ; } } break ; default: // p-norm not yet supported s = -1 ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; return (s) ; }
filter2.c
#include "mex.h" #include <time.h> //mex CFLAGS='$CFLAGS -ffast-math' filter.c void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { mwSize n_data_samples = mxGetNumberOfElements(prhs[2]); mwSize b_length = mxGetNumberOfElements(prhs[0]); mwSize a_length = mxGetNumberOfElements(prhs[0]); double b[8] = {0.123,0.234,0.345,0.456,0.567,0.678,0.789,0.890}; double *x = calloc(n_data_samples,sizeof(double)); for (int i = 0; i <n_data_samples; i++){ x[i] = i; } //double b[8] = {0.123,0.234,0.345,0.456,0.567,0.678,0.789,0.890}; //Just running memory allocation is 18 ms for 1e7 samples ... //Doing 5e7 yields 6 ms ... wtf???? double *y = calloc(n_data_samples,sizeof(double)); clock_t clock_begin; clock_t clock_end; clock_begin = clock(); #pragma omp parallel for for (mwSize j = b_length; j < n_data_samples; j++) { y[j] = b[0]*x[j] + b[1]*x[j-1] + b[2]*x[j-2] + b[3]*x[j-3] + + b[4]*x[j-4] + b[5]*x[j-5] + b[6]*x[j-6] + b[7]*x[j-7]; } clock_end = clock(); double run_time = (double)(clock_end - clock_begin) / CLOCKS_PER_SEC; mexPrintf("t1: %g\n",run_time); free(x); free(y); }
omptcb.h
// omptcb.c -- code for the interactions with the OpenMP library to verify // the behavior of various callbacks #include <inttypes.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include <omp-tools.h> #include "chkompt.h" ompt_function_lookup_t my_lookup; ompt_set_callback_t ompt_set_callback_fn; // Address of routine to set callback ompt_get_task_info_t ompt_get_task_info_fn; // Address of routine to get task information void (*validate_ptr)(const char *) = validate; void (*ck_ra_)(const char *, int, const void *, int, char*) = ck_ra; int in_implicit_task = 0; hrtime_t starttime; // ------------------------------------------------------------------------ // inform the runtime that we will be using OMPT // This routine is automatically invoked by the OpenMP runtime at // its initialization. It tells the library where to find: // ompt_initialize -- which is invoked at the first entry to the runtime // and // ompt_finalize -- which is invoked when the runtime shuts down // ------------------------------------------------------------------------ void error_breakpoint() { } ompt_start_tool_result_t * ompt_start_tool ( unsigned int omp_version, const char *runtime_version ) { // fprintf(stderr, "ompt_start_tool invoked\n"); static ompt_start_tool_result_t result = { ompt_initialize, ompt_finalize, ompt_data_none }; return &result; } // ------------------------------------------------------------------------ // initialize upcall for OMPT // ------------------------------------------------------------------------ int ompt_initialize ( ompt_function_lookup_t lookup, int initial_device_num, ompt_data_t *tool_data ) { // fprintf(stderr, "ompt_initialize invoked\n"); my_lookup = lookup; starttime = gethrtime(); // look up the runtime entry points ompt_get_task_info_fn = (ompt_get_task_info_t) my_lookup("ompt_get_task_info"); #ifndef NO_CALLBACKS // look up two runtime entry points ompt_set_callback_fn = (ompt_set_callback_t) lookup("ompt_set_callback"); // register callbacks to be notified about various events register_callbacks(); #endif return 1; } // ------------------------------------------------------------------------ // finalize upcall for OMPT -- nothing to do // ------------------------------------------------------------------------ void ompt_finalize ( ompt_data_t *tool_data) { } // ------------------------------------------------------------------------ // Register the various callbacks that will be tested // ------------------------------------------------------------------------ char *cb_names[] = { "illegal callback number", //=0 "ompt_callback_thread_begin", //=1, "ompt_callback_thread_end", //=2, "ompt_callback_parallel_begin", //=3, "ompt_callback_parallel_end", //=4, "ompt_callback_task_create", //=5, "ompt_callback_task_schedule", //=6, "ompt_callback_implicit_task", //=7, "ompt_callback_target", //=8, "ompt_callback_target_data_op", //=9, "ompt_callback_target_submit", //=10, "ompt_callback_control_tool", //=11, "ompt_callback_device_initialize", //=12, "ompt_callback_device_finalize", //=13, "ompt_callback_device_load", //=14, "ompt_callback_device_unload", //=15, "ompt_callback_sync_region_wait", //=16, "ompt_callback_mutex_released", //=17, "ompt_callback_dependences", //=18, "ompt_callback_task_dependence", //=19, "ompt_callback_work", //=20, "ompt_callback_master", //=21, "ompt_callback_target_map", //=22, "ompt_callback_sync_region", //=23, "ompt_callback_lock_init", //=24, "ompt_callback_lock_destroy", //=25, "ompt_callback_mutex_acquire", //=26, "ompt_callback_mutex_acquired", //=27, "ompt_callback_nest_lock", //=28, "ompt_callback_flush", //=29, "ompt_callback_cancel", //=30, "ompt_callback_reduction", //=31, "ompt_callback_dispatch", //=32 NULL }; void register_callbacks() { int ncallbacks = 0; ompt_set_result_t ret; // Define a macro to set a callback #define SetCallback(type,name) \ ret = ompt_set_callback_fn ( type, (ompt_callback_t) name); \ if ( (ret == ompt_set_error) || (ret == ompt_set_never) ) { \ fprintf(stderr, " Note: %s (%2d) is never triggered in this implementation of OMPT (%d)\n", \ cb_names[type], (int)type, (int)ret ); \ } else if (ret == ompt_set_impossible) { \ fprintf(stderr, " Note: %s (%2d) is impossible in this implementation of OMPT (%d)\n", \ cb_names[type], (int)type, (int)ret ); \ } else if ( (ret == ompt_set_sometimes) || (ret == ompt_set_sometimes_paired) ) { \ fprintf(stderr, " Note: %s (%2d) may or may not be triggered in this implementation of OMPT (%d)\n", \ cb_names[type], (int)type, (int)ret ); \ } else { \ ncallbacks ++; \ } // Callback for thread begin SetCallback(ompt_callback_thread_begin,ompt_thread_begin); // Callback for thread end SetCallback(ompt_callback_thread_end, ompt_thread_end); // Callback for parallel region begin SetCallback(ompt_callback_parallel_begin, ompt_parallel_begin); // Callback for parallel region end SetCallback(ompt_callback_parallel_end, ompt_parallel_end); // Callback for task creation SetCallback(ompt_callback_task_create, ompt_task_create); // Callback for task schedule SetCallback(ompt_callback_task_schedule, ompt_task_schedule); // Callback for implicit task creation SetCallback(ompt_callback_implicit_task, ompt_implicit_task); // Callback for target SetCallback(ompt_callback_target, ompt_targetcb); // Callback for target_data_op SetCallback(ompt_callback_target_data_op, ompt_target_data_op); // Callback for target submit SetCallback(ompt_callback_target_submit, ompt_target_submit); // Callback for control_tool SetCallback(ompt_callback_control_tool, ompt_control_tool); // Callback for device_initialize SetCallback(ompt_callback_device_initialize, ompt_device_initialize); // Callback for device_finalize SetCallback(ompt_callback_device_finalize, ompt_device_finalize); // Callback for device_load SetCallback(ompt_callback_device_load, ompt_device_load); // Callback for device_unload SetCallback(ompt_callback_device_unload, ompt_device_unload); // Callback for synchronization region wait SetCallback(ompt_callback_sync_region_wait, ompt_sync_region_wait); // Callback for mutex released SetCallback(ompt_callback_mutex_released, ompt_mutex_released); // Callback for dependences SetCallback(ompt_callback_dependences, ompt_dependences); // Callback for task_dependence SetCallback(ompt_callback_task_dependence, ompt_task_dependence); // Callback for work entry SetCallback(ompt_callback_work, ompt_work); // Callback for master region entry SetCallback(ompt_callback_master, ompt_master); // Callback for target map SetCallback(ompt_callback_target_map, ompt_target_map); // Callback for synchronization region SetCallback(ompt_callback_sync_region, ompt_sync_region); // Callback for lock init SetCallback(ompt_callback_lock_init, ompt_lock_init); // Callback for lock_destroy SetCallback(ompt_callback_lock_destroy, ompt_lock_destroy); // Callback for mutex acquire SetCallback(ompt_callback_mutex_acquire, ompt_mutex_acquire); // Callback for mutex acquired SetCallback(ompt_callback_mutex_acquired, ompt_mutex_acquired); // Callback for nest_lock SetCallback(ompt_callback_nest_lock, ompt_nest_lock); // Callback for flush SetCallback(ompt_callback_flush, ompt_flush); // Callback for cancel SetCallback(ompt_callback_cancel, ompt_cancel); // Callback for reduction SetCallback(ompt_callback_reduction, ompt_reduction); // Callback for dispatch SetCallback(ompt_callback_dispatch, ompt_dispatch); fprintf(stderr, " %d other callbacks were set\n\n", ncallbacks); } // ------------------------------------------------------------------------ // ------------------------------------------------------------------------ // The various Callback routines // ------------------------------------------------------------------------ // ------------------------------------------------------------------------ // OMPT callback for implicit task creation // ------------------------------------------------------------------------ void ompt_implicit_task ( ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, unsigned int actual_parallelism, unsigned int index, int flags ) { // trace the callback ck_ra("implicit_task_CB", 1, (const void*)1, (int)index, (endpoint == ompt_scope_begin ? "[begin] " : "[end] ") ); if (endpoint == ompt_scope_begin) { (*validate_ptr)("implicit task begin"); in_implicit_task = 1; } else if (endpoint == ompt_scope_end) { // (*validate_ptr)("implicit task end"); // can't validate in_implicit_task = 0; } else { abort(); // no others are defined } } // ------------------------------------------------------------------------ // OMPT callback for begin // ------------------------------------------------------------------------ void ompt_thread_begin ( ompt_thread_t thread_type, ompt_data_t *thread_data ) { char *ctype = "unknown"; switch (thread_type) { case ompt_thread_initial: ctype = "[initial] "; break; case ompt_thread_worker: ctype = "[worker] "; break; case ompt_thread_other: ctype = "[other] "; break; case ompt_thread_unknown: ctype = "[unknown] "; break; } ck_ra("thread_begin_CB", 1, (const void*)1, (int)thread_type, ctype); } // ------------------------------------------------------------------------ // OMPT callback for thread end // ------------------------------------------------------------------------ void ompt_thread_end ( ompt_data_t *thread_data ) { ck_ra("thread_end_CB", 1, (const void*)1, 0, NULL); } // ------------------------------------------------------------------------ // OMPT callback for parallel region entry // ------------------------------------------------------------------------ void ompt_parallel_begin ( ompt_data_t *encountering_task_data, const ompt_frame_t *encountering_task_frame, ompt_data_t *parallel_data, unsigned int requested_parallelism, int flags, const void *codeptr_ra ) { ck_ra("parallel_begin_CB", 0, codeptr_ra, 0, NULL); } // ------------------------------------------------------------------------ // OMPT callback for parallel region end // ------------------------------------------------------------------------ void ompt_parallel_end ( ompt_data_t *parallel_data, ompt_data_t *encountering_task_data, int flags, const void *codeptr_ra ) { ck_ra("parallel_end_CB", 0, codeptr_ra, 0, NULL); } // ------------------------------------------------------------------------ // OMPT callback for task create // ------------------------------------------------------------------------ void ompt_task_create ( ompt_data_t *encountering_task_data, const ompt_frame_t *encountering_task_frame, ompt_data_t *new_task_data, int flags, int has_dependences, const void *codeptr_ra ) { char task_type[128]; int is_initial; format_task_type(flags, task_type); #if 0 is_initial = flags & ompt_task_initial; if (!is_initial) { ck_ra("task_create_CB", 0, codeptr_ra, 0, task_type); } else { ck_ra("task_create_CB", 3, codeptr_ra, 0, task_type); } #else ck_ra("task_create_CB", 0, codeptr_ra, 0, task_type); #endif } // ------------------------------------------------------------------------ // OMPT callback for task schedule // ------------------------------------------------------------------------ static char* ompt_task_status_t_values[] = { NULL, "[task_complete] ", // 1 "[task_yield] ", // 2 "[task_cancel] ", // 3 "[task_detach] ", // 4 "[task_early_fulfill] ", // 5 "[task_late_fulfill] ", // 6 "[task_switch] " // 7 }; void ompt_task_schedule ( ompt_data_t *prior_task_data, ompt_task_status_t prior_task_status, ompt_data_t *new_task_data ) { ck_ra("task_schedule_CB", 1, (const void *)1, 0, ompt_task_status_t_values[prior_task_status] ); } // ------------------------------------------------------------------------ // OMPT callback for ompt_target // ------------------------------------------------------------------------ void ompt_targetcb( ompt_target_t kind, ompt_scope_endpoint_t endpoint, int device_num, ompt_id_t task_data, ompt_id_t target_id, const void *codeptr_ra ) { ck_ra("target_CB", 0, codeptr_ra, device_num, NULL); } // ------------------------------------------------------------------------ // OMPT callback for ompt_target_data_op // ------------------------------------------------------------------------ void ompt_target_data_op( ompt_id_t target_id, ompt_id_t host_op_id, ompt_target_data_op_t optype, void *src_addr, int src_device_num, void *dest_addr, int dest_device_num, size_t bytes, const void *codeptr_ra ) { ck_ra("target data_op_CB", 0, codeptr_ra, src_device_num, NULL); } // ------------------------------------------------------------------------ // OMPT callback for ompt_target_submit // ------------------------------------------------------------------------ void ompt_target_submit( ompt_id_t target_id, ompt_id_t host_op_id, unsigned int requested_num_teams ) { ck_ra("target_submit_CB", 1, (const void *)1, target_id, NULL); } // ------------------------------------------------------------------------ // OMPT callback for work // ------------------------------------------------------------------------ void ompt_work ( ompt_work_t wstype, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, uint64_t count, const void *codeptr_ra ) { char buffer[128]; format_work_type(wstype, endpoint, buffer); ck_ra("work_CB", 0, codeptr_ra, (int)wstype, buffer); } // ------------------------------------------------------------------------ // OMPT callback for master // ------------------------------------------------------------------------ void ompt_master ( ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra ) { ck_ra("master_CB", 0, codeptr_ra, (int)endpoint, (endpoint == ompt_scope_begin ? "[begin] " : "[end] ") ); } // ------------------------------------------------------------------------ // OMPT callback for target_map // ------------------------------------------------------------------------ void ompt_target_map ( ompt_id_t id, unsigned int nitems, void **host_adder, void **device_addr, size_t *bytes, unsigned int *mapping_flags, const void *codeptr_ra ) { ck_ra("target_map_CB", 0, codeptr_ra, (int) id, NULL); } // ------------------------------------------------------------------------ // OMPT callback for sync_region_wait // ------------------------------------------------------------------------ void ompt_sync_region_wait ( ompt_sync_region_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra ) { char buf[128]; format_sync_type(kind, endpoint, buf); if (in_implicit_task) { ck_ra("sync_region_wait_CB", 0, codeptr_ra, (int) kind, buf); } else { ck_ra("sync_region_wait_CB", 2, codeptr_ra, (int) kind, buf); } } // ------------------------------------------------------------------------ // OMPT callback for sync_region // ------------------------------------------------------------------------ void ompt_sync_region ( ompt_sync_region_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra ) { char buf[128]; format_sync_type(kind, endpoint, buf); if (in_implicit_task) { ck_ra("sync_region_CB", 0, codeptr_ra, (int) kind, buf); } else { ck_ra("sync_region_CB", 2, codeptr_ra, (int) kind, buf); } } // ------------------------------------------------------------------------ // OMPT callback for lock_init // ------------------------------------------------------------------------ void ompt_lock_init ( ompt_mutex_t kind, unsigned int hint, unsigned int impl, ompt_wait_id_t wait_id, const void *codeptr_ra ) { char buffer[128]; format_lock_type (kind, buffer); ck_ra("lock_init_CB", 0, codeptr_ra, (int) kind, buffer); } // ------------------------------------------------------------------------ // OMPT callback for lock_destroy // ------------------------------------------------------------------------ void ompt_lock_destroy ( ompt_mutex_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra ) { char buffer[128]; format_lock_type (kind, buffer); ck_ra("lock_destroy_CB", 0, codeptr_ra, (int) kind, buffer); } // ------------------------------------------------------------------------ // OMPT callback for mutex_acquire // ------------------------------------------------------------------------ void ompt_mutex_acquire ( ompt_mutex_t kind, unsigned int hint, unsigned int impl, ompt_wait_id_t wait_id, const void *codeptr_ra ) { char buffer[128]; format_lock_type (kind, buffer); ck_ra("mutex_acquire_CB", 0, codeptr_ra, (int) kind, buffer); } // ------------------------------------------------------------------------ // OMPT callback for mutex acquired // ------------------------------------------------------------------------ void ompt_mutex_acquired ( ompt_mutex_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { char buffer[128]; format_lock_type (kind, buffer); ck_ra("mutex_acquired_CB", 0, codeptr_ra, (int) kind, buffer); } // ------------------------------------------------------------------------ // OMPT callback for mutex released // ------------------------------------------------------------------------ void ompt_mutex_released( ompt_mutex_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { char buffer[128]; format_lock_type (kind, buffer); ck_ra("mutex_released_CB", 0, codeptr_ra, (int) kind, buffer); } // ------------------------------------------------------------------------ // OMPT callback for dependences // ------------------------------------------------------------------------ void ompt_dependences ( ompt_data_t *task_data, const ompt_dependence_t *deps, int ndeps ) { ck_ra("dependences_CB", 1,(const void *)1, (int) ndeps, NULL); } // ------------------------------------------------------------------------ // OMPT callback for task_dependence // ------------------------------------------------------------------------ void ompt_task_dependence ( ompt_data_t *src_task_data, ompt_data_t *sink_task_data ) { ck_ra("task_dependence_CB", 1,(const void *)1, 0, NULL); } // ------------------------------------------------------------------------ // OMPT callback for nest_lock // ------------------------------------------------------------------------ void ompt_nest_lock ( ompt_scope_endpoint_t endpoint, ompt_wait_id_t wait_id, const void *codeptr_ra ) { ck_ra("nest_lock_CB", 0, codeptr_ra, (int) endpoint, (endpoint == ompt_scope_begin ? "[begin] " : "[end] ") ); } // ------------------------------------------------------------------------ // OMPT callback for flush // ------------------------------------------------------------------------ void ompt_flush ( ompt_data_t *thread_data, const void *codeptr_ra ) { ck_ra("flush_CB", 0, codeptr_ra, 0, NULL); } // ------------------------------------------------------------------------ // OMPT callback for cancel // ------------------------------------------------------------------------ static char* ompt_cancel_flag_t_values[] = { "[parallel] ", "[sections] ", "[loop] ", "[taskgroup] ", "[activated] ", "[detected] ", "[discarded_task] " }; void ompt_cancel ( ompt_data_t *task_data, int flags, const void *codeptr_ra ) { ck_ra("cancel_CB", 0, codeptr_ra, 0, ompt_cancel_flag_t_values[flags] ); } // ------------------------------------------------------------------------ // OMPT callback for control_tool // ------------------------------------------------------------------------ void ompt_control_tool ( uint64_t command, uint64_t modifier, void *arg, const void *codeptr_ra ) { ck_ra("control_tool_CB", 0, codeptr_ra, 0, NULL); } // ------------------------------------------------------------------------ // OMPT callback for device_initialize // ------------------------------------------------------------------------ void ompt_device_initialize ( int device_num, const char *type, ompt_device_t *device, ompt_function_lookup_t lookup, const char *documentation ) { ck_ra("device_initialize_CB", 1, (const void*) 1, device_num, NULL); } // ------------------------------------------------------------------------ // OMPT callback for device_finalize // ------------------------------------------------------------------------ void ompt_device_finalize( int device_num ) { ck_ra("device_finalize_CB", 1, (const void*) 1, device_num, NULL); } // ------------------------------------------------------------------------ // OMPT callback for device_load // ------------------------------------------------------------------------ void ompt_device_load( int device_num, const char *filename, int64_t offset_in_file, void *vma_in_file, size_t bytes, void *host_addr, void *device_addr, uint64_t module_id ) { ck_ra("device_load_CB", 1, (const void*) 1, device_num, NULL); } // ------------------------------------------------------------------------ // OMPT callback for device_unload // ------------------------------------------------------------------------ void ompt_device_unload ( int device_num, uint64_t module_id ) { ck_ra("device_unload_CB", 1, (const void*) 1, device_num, NULL); } // ------------------------------------------------------------------------ // OMPT callback for dispatch // ------------------------------------------------------------------------ void ompt_reduction ( ompt_sync_region_t kind, ompt_scope_endpoint_t endpoint, ompt_id_t parallel_id, ompt_id_t task_id, const void *codeptr_ra ) { ck_ra("reduction_CB", 0, codeptr_ra, (int)kind, NULL); } // ------------------------------------------------------------------------ // OMPT callback for dispatch // ------------------------------------------------------------------------ void ompt_dispatch ( ompt_data_t *parallel_data, ompt_data_t *task_data, ompt_dispatch_t kind, ompt_data_t instance ) { ck_ra("dispatch_CB", 1, (const void*)1, (int)kind, NULL); } // ------------------------------------------------------------------------ // format_sync_type -- convert a synchonization type and endpoint to a string // ------------------------------------------------------------------------ void format_sync_type(ompt_sync_region_t type, ompt_scope_endpoint_t endpoint, char *buffer) { char *ctype = "unknown"; char *progress = buffer; if (endpoint == ompt_scope_begin) { progress += sprintf(progress, "[begin "); } else { progress += sprintf(progress, "[end "); } switch (type) { case ompt_sync_region_barrier: ctype = "barrier"; break; case ompt_sync_region_barrier_implicit: ctype = "barrier-implicit"; break; case ompt_sync_region_barrier_explicit: ctype = "barrier-explicit"; break; case ompt_sync_region_barrier_implementation: ctype = "barrier-implementation"; break; case ompt_sync_region_taskwait: ctype = "taskwait"; break; case ompt_sync_region_taskgroup: ctype = "taskgroup"; break; case ompt_sync_region_reduction: ctype = "reduction"; break; } progress += sprintf(progress, "%s] ", ctype); } // ------------------------------------------------------------------------ // format_work_type -- convert a work type and endpoint to a string // ------------------------------------------------------------------------ void format_work_type(ompt_work_t type, ompt_scope_endpoint_t endpoint, char *buffer) { char *ctype = "unknown"; char *progress = buffer; if (endpoint == ompt_scope_begin) { progress += sprintf(progress, "[begin "); } else { progress += sprintf(progress, "[end "); } switch (type) { case ompt_work_loop: ctype = "loop"; break; case ompt_work_sections: ctype = "sections"; break; case ompt_work_single_executor: ctype = "single_executor"; break; case ompt_work_single_other: ctype = "single_other"; break; case ompt_work_workshare: ctype = "worksharc"; break; case ompt_work_distribute: ctype = "distribute"; break; case ompt_work_taskloop: ctype = "taskloop"; break; } progress += sprintf(progress, "%s] ", ctype); } // ------------------------------------------------------------------------ // format_task_type -- convert a task type to a string // ------------------------------------------------------------------------ void format_task_type(int type, char *buffer) { char *progress = &buffer[1]; buffer[0] = '['; if (type & ompt_task_initial) progress += sprintf(progress, "initial"); if (type & ompt_task_implicit) progress += sprintf(progress, "implicit"); if (type & ompt_task_explicit) progress += sprintf(progress, "explicit"); if (type & ompt_task_target) progress += sprintf(progress, "target"); if (type & ompt_task_undeferred) progress += sprintf(progress, "|undeferred"); if (type & ompt_task_untied) progress += sprintf(progress, "|untied"); if (type & ompt_task_final) progress += sprintf(progress, "|final"); if (type & ompt_task_mergeable) progress += sprintf(progress, "|mergeable"); if (type & ompt_task_merged) progress += sprintf(progress, "|merged"); progress += sprintf(progress, "] "); } // ------------------------------------------------------------------------ // format_lock_type -- convert a lock type to a string // ------------------------------------------------------------------------ void format_lock_type(ompt_mutex_t type, char *buffer) { char *ctype = "unknown"; switch (type) { case ompt_mutex_lock: ctype = "mutex"; break; case ompt_mutex_test_lock: ctype = "mutex_test"; break; case ompt_mutex_nest_lock: ctype = "mutex_nest"; break; case ompt_mutex_test_nest_lock: ctype = "mutex_test_nest"; break; case ompt_mutex_critical: ctype = "mutex_critical"; break; case ompt_mutex_atomic: ctype = "mutex_atomic"; break; case ompt_mutex_ordered: ctype = "mutex_ordered"; break; } sprintf(buffer, "[%s] ", ctype); } // ------------------------------------------------------------------------ // ck_ra -- invoked from various callbacks // check that the return address pointer is non-NULL // ckra parameter is: // 0 to report an error if NULL // 1 if address is not supplied in the callback // 2 if in the implicit task // 3 if xxxx // // ------------------------------------------------------------------------ void ck_ra(const char * type, int ckra, const void *ra, int param, char *desc) { int threadnum; char buf[512]; threadnum = omp_get_thread_num(); if ( (ckra == 0) && (ra == NULL) ) { sprintf( buf, "%25s -- ERROR -- %sthread %3d, param = %d, codeptr_ra == NULL\n", type, (desc != NULL? desc : ""), threadnum, param ); ts_write (buf); error_breakpoint(); #pragma omp atomic update nfails ++; } else { #ifdef TRACE_ALL if (ckra == 0) { sprintf( buf, "%25s OK ck_ra -- %sthread %3d, param = %d codeptr_ra = %p\n", type, (desc != NULL? desc : ""), threadnum, param, ra ); } else if (ckra == 1) { sprintf( buf, "%25s OK ck_ra -- %sthread %3d, param = %d\n", type, (desc != NULL? desc : ""), threadnum, param ); } else if (ckra == 2 ) { sprintf( buf, "%25s OK ck_ra -- %sthread %3d, param = %d codeptr_ra = %p -- in implicit task\n", type, (desc != NULL? desc : ""), threadnum, param, ra ); #if 0 } else if (ckra == 3 ) { sprintf( buf, "%25s OK ck_ra -- %sthread %3d, param = %d codeptr_ra = %p -- initial task create\n", type, (desc != NULL? desc : ""), threadnum, param, ra ); #endif } else { fprintf(stderr, "Ooops, INTERNAL ERROR -- invalid ckra parameter = %d\n", ckra); } ts_write (buf); #endif } } // ------------------------------------------------------------------------ // validate // delay a bit // ask for the caller's frame // check that its exit_frame pointer is non-NULL, and flag is non-zero // check that its enter_frame pointer is NULL, and flag is zero // ask for the caller's ancestors' frame // check that its exit_frame pointer is non-NULL, and flag is non-zero // delay a varying amount, depending on thread number to desynchonize the threads // ------------------------------------------------------------------------ void validate(const char *type) { int thread_num; ompt_frame_t *task_frame; ompt_frame_t *parent_task_frame; char buf[256]; #ifdef RUN_SKEW (*skew_delay_ptr)(1); #endif ompt_get_task_info_fn ( 0, // ancestor_level NULL, // flags NULL, // task_data &task_frame, NULL, // parallel_data &thread_num ); // Check for failure if (task_frame == NULL) { sprintf( buf, "%25s -- ERROR -- thread %3d task_frame = NULL\n", type, thread_num); ts_write (buf); error_breakpoint(); #pragma omp atomic update nfails ++; } else if (task_frame->exit_frame.ptr == NULL) { error_breakpoint(); sprintf( buf, "%25s -- ERROR -- thread %3d exit_frame.ptr = NULL\n", type, thread_num); ts_write (buf); #pragma omp atomic update nfails ++; } else if (task_frame->exit_frame_flags == 0) { sprintf( buf, "%25s -- ERROR -- thread %3d exit_frame.flags = 0\n", type, thread_num); ts_write (buf); error_breakpoint(); #pragma omp atomic update nfails ++; } else if (task_frame->enter_frame.ptr != NULL) { sprintf( buf, "%25s -- ERROR -- thread %3d enter_frame.ptr != NULL\n", type, thread_num); ts_write (buf); error_breakpoint(); #pragma omp atomic update nfails ++; } else if (task_frame->enter_frame_flags != 0) { sprintf( buf, "%25s -- ERROR -- thread %3d enter_frame.flags = 0x%02x != 0\n", type, thread_num, task_frame->enter_frame_flags); ts_write (buf); error_breakpoint(); #pragma omp atomic update nfails ++; // Now check the enter_frame for the ancestor ompt_get_task_info_fn ( 1, // ancestor_level NULL, // flags NULL, // task_data &parent_task_frame, NULL, // parallel_data &thread_num ); if (parent_task_frame == NULL) { sprintf( buf, "%25s -- ERROR -- thread %3d parent_task_frame = NULL\n", type, thread_num); ts_write (buf); error_breakpoint(); #pragma omp atomic update nfails ++; } else if (parent_task_frame->enter_frame.ptr == NULL) { sprintf( buf, "%25s -- ERROR -- thread %3d parent enter_frame.ptr = NULL\n", type, thread_num); ts_write (buf); error_breakpoint(); #pragma omp atomic update nfails ++; } else if (parent_task_frame->enter_frame_flags == 0) { sprintf( buf, "%25s -- ERROR -- thread %3d parent enter_frame_flags = 0\n", type, thread_num); ts_write (buf); error_breakpoint(); #pragma omp atomic update nfails ++; } } else { #ifdef TRACE_ALL sprintf( buf, "%25s OK return -- thread %3d exit_frame.ptr = %p flags = 0x%02x\n", type, thread_num, task_frame->exit_frame.ptr, task_frame->exit_frame_flags); ts_write (buf); #endif } #ifdef RUN_SKEW (*skew_delay_ptr)(thread_num); #endif } // ------------------------------------------------------------------------ // ts_write -- write error (or log) to stderr, with a timestamp // if NOTIMESTAMP is defined, don't write the timestamp // ------------------------------------------------------------------------ void ts_write (char *message) { hrtime_t delta; char buf[512]; int sec; int nsec; #ifdef NO_TIMESTAMPS fwrite (message, strlen(message), 1, stderr ); #else delta = gethrtime() - starttime; sec = delta / 1000000000; nsec = delta % 1000000000; sprintf(buf, "%4d.%09d: %s", sec, nsec, message); fwrite (buf, strlen(buf), 1, stderr ); #endif } hrtime_t gethrtime() { return ( (hrtime_t) (omp_get_wtime() * 1.0E09) ); }
healpix_map.h
/* * This file is part of Healpix_cxx. * * Healpix_cxx is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Healpix_cxx is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Healpix_cxx; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * For more information about HEALPix, see http://healpix.jpl.nasa.gov */ /* * Healpix_cxx is being developed at the Max-Planck-Institut fuer Astrophysik * and financially supported by the Deutsches Zentrum fuer Luft- und Raumfahrt * (DLR). */ /*! \file healpix_map.h * Copyright (C) 2003, 2004, 2005, 2006 Max-Planck-Society * \author Martin Reinecke */ #ifndef HEALPIX_MAP_H #define HEALPIX_MAP_H #include "arr.h" #include "healpix_base.h" /*! A HEALPix map of a given datatype */ template<typename T> class Healpix_Map: public Healpix_Base { private: arr<T> map; public: /*! Constructs an unallocated map. */ Healpix_Map () {} /*! Constructs a map with a given \a order and the ordering scheme \a scheme. */ Healpix_Map (int order, Healpix_Ordering_Scheme scheme) : Healpix_Base (order, scheme), map(npix_) {} /*! Constructs a map with a given \a nside and the ordering scheme \a scheme. */ Healpix_Map (int nside, Healpix_Ordering_Scheme scheme, const nside_dummy) : Healpix_Base (nside, scheme, SET_NSIDE), map(npix_) {} /*! Constructs a map from the contents of \a data and sets the ordering scheme to \a Scheme. The size of \a data must be a valid HEALPix map size. */ Healpix_Map (const arr<T> &data, Healpix_Ordering_Scheme scheme) : Healpix_Base (npix2nside(data.size()), scheme, SET_NSIDE), map(data) {} /*! Deletes the old map, creates a map from the contents of \a data and sets the ordering scheme to \a scheme. The size of \a data must be a valid HEALPix map size. */ void Set (arr<T> &data, Healpix_Ordering_Scheme scheme) { Healpix_Base::SetNside(npix2nside (data.size()), scheme); map.transfer(data); } /*! Deletes the old map and creates a new map with a given \a order and the ordering scheme \a scheme. */ void Set (int order, Healpix_Ordering_Scheme scheme) { Healpix_Base::Set(order, scheme); map.alloc(npix_); } /*! Deletes the old map and creates a new map with a given \a nside and the ordering scheme \a scheme. */ void SetNside (int nside, Healpix_Ordering_Scheme scheme) { Healpix_Base::SetNside(nside, scheme); map.alloc(npix_); } /*! Fills the map with \a val. */ void fill (const T &val) { map.fill(val); } /*! Imports the map \a orig into the current map, adjusting the ordering scheme. \a orig must have the same resolution as the current map. */ void Import_nograde (const Healpix_Map<T> &orig) { planck_assert (nside_==orig.nside_, "Import_nograde: maps have different nside"); if (orig.scheme_ == scheme_) for (int m=0; m<npix_; ++m) map[m] = orig.map[m]; else { swapfunc swapper = (scheme_ == NEST) ? &Healpix_Base::ring2nest : &Healpix_Base::nest2ring; #pragma omp parallel { int m; #pragma omp for schedule (dynamic,5000) for (m=0; m<npix_; ++m) map[(this->*swapper)(m)] = orig.map[m]; } } } /*! Imports the map \a orig into the current map, adjusting the ordering scheme and the map resolution. \a orig must have higher resolution than the current map. */ void Import_upgrade (const Healpix_Map<T> &orig) { planck_assert(nside_>orig.nside_,"Import_upgrade: this is no upgrade"); int fact = nside_/orig.nside_; planck_assert (nside_==orig.nside_*fact, "the larger Nside must be a multiple of the smaller one"); pix2xyf to_xyf = (orig.scheme_==RING) ? &Healpix_Map::ring2xyf : &Healpix_Map::nest2xyf; xyf2pix from_xyf = (scheme_==RING) ? &Healpix_Map::xyf2ring : &Healpix_Map::xyf2nest; #pragma omp parallel { int m; #pragma omp for schedule (dynamic,5000) for (m=0; m<orig.npix_; ++m) { int x,y,f; (orig.*to_xyf)(m,x,y,f); for (int j=fact*y; j<fact*(y+1); ++j) for (int i=fact*x; i<fact*(x+1); ++i) { int mypix = (this->*from_xyf)(i,j,f); map[mypix] = orig.map[m]; } } } } /*! Imports the map \a orig into the current map, adjusting the ordering scheme and the map resolution. \a orig must have higher resolution than the current map. \a pessimistic determines whether or not pixels are set to \a Healpix_undef when not all of the corresponding high-resolution pixels are defined. This method is instantiated for \a float and \a double only. */ void Import_degrade (const Healpix_Map<T> &orig, bool pessimistic=false); /*! Imports the map \a orig into the current map, adjusting the ordering scheme and the map resolution if necessary. When downgrading, \a pessimistic determines whether or not pixels are set to \a Healpix_undef when not all of the corresponding high-resolution pixels are defined. This method is instantiated for \a float and \a double only. */ void Import (const Healpix_Map<T> &orig, bool pessimistic=false) { if (orig.nside_ == nside_) // no up/degrading Import_nograde(orig); else if (orig.nside_ < nside_) // upgrading Import_upgrade(orig); else Import_degrade(orig,pessimistic); } /*! Returns a constant reference to the pixel with the number \a pix. */ const T &operator[] (int pix) const { return map[pix]; } /*! Returns a reference to the pixel with the number \a pix. */ T &operator[] (int pix) { return map[pix]; } /*! Swaps the map ordering from RING to NEST and vice versa. This is done in-place (i.e. with negligible space overhead). */ void swap_scheme() { static const int clen[] = { 0,7,5,4,12,10,13,18,14,19,18,17,27,21 }; static const int cycle[][30] = { { }, { 0,1,8,12,16,21,40 }, { 0,1,2,40,114 }, { 0,4,160,263 }, { 0,4,30,49,51,87,526,1027,1105,1387,1807,2637 }, { 0,8,10,18,39,74,146,307,452,4737 }, { 0,1,2,7,9,17,80,410,1526,1921,32859,33566,38931 }, { 0,5,6,10,12,24,27,95,372,494,924,1409,3492,4248,9137,66043,103369, 156899 }, { 0,1,2,3,4,45,125,351,697,24337,102940,266194,341855,419857 }, { 0,1,2,3,9,16,1705,2082,2126,8177,12753,15410,52642,80493,83235, 88387,99444,1675361,2495125 }, { 0,2,6,8,9,11,20,50,93,152,183,2137,13671,44794,486954,741908, 4803258,5692573 }, { 0,1,5,6,44,53,470,2847,3433,4906,13654,14710,400447,1797382, 2744492,18775974,23541521 }, { 0,4,9,10,16,33,83,117,318,451,5759,10015,128975,171834,211256, 347608,1278690,2154097,2590798,3427694,5581717,21012301,27023976, 72522811,95032729,139166747,171822389 }, { 0,5,10,267,344,363,2968,3159,9083,18437,76602,147614,1246902, 1593138,2035574,6529391,9511830,11340287,29565945,281666026, 677946848 } }; swapfunc swapper = (scheme_ == NEST) ? &Healpix_Base::ring2nest : &Healpix_Base::nest2ring; planck_assert (order_>=0, "swap_scheme(): need hierarchical map"); for (int m=0; m<clen[order_]; ++m) { int istart = cycle[order_][m]; T pixbuf = map[istart]; int iold = istart, inew = (this->*swapper)(istart); while (inew != istart) { map[iold] = map[inew]; iold = inew; inew = (this->*swapper)(inew); } map[iold] = pixbuf; } scheme_ = (scheme_==RING) ? NEST : RING; } /*! performs the actual interpolation using \a pix and \a wgt. */ T interpolation (const fix_arr<int,4> &pix, const fix_arr<double,4> &wgt) const { return map[pix[0]]*wgt[0] + map[pix[1]]*wgt[1] + map[pix[2]]*wgt[2] + map[pix[3]]*wgt[3]; } /*! Returns the interpolated map value at \a ptg */ T interpolated_value (const pointing &ptg) const { fix_arr<int,4> pix; fix_arr<double,4> wgt; get_interpol (ptg, pix, wgt); return interpolation (pix, wgt); } /*! Returns a constant reference to the map data. */ const arr<T> &Map() const { return map; } /*! Returns the minimum and maximum value of the map in \a Min and \a Max. This method is instantiated for \a float and \a double only. */ void minmax (T &Min, T &Max) const; /*! Swaps the contents of two Healpix_Map objects. */ void swap (Healpix_Map &other) { Healpix_Base::swap(other); map.swap(other.map); } /*! Returns the average of all defined map pixels. */ double average() const { double avg=0; int pix=0; for (int m=0; m<npix_; ++m) if (!approx<double>(map[m],Healpix_undef)) { ++pix; avg+=map[m]; } return avg/pix; } /*! Adds \a val to all defined map pixels. */ void add (T val) { for (int m=0; m<npix_; ++m) if (!approx<double>(map[m],Healpix_undef)) { map[m]+=val; } } /*! Returns the root mean square of the map, not counting undefined pixels. */ double rms() const { using namespace std; double result=0; int pix=0; for (int m=0; m<npix_; ++m) if (!approx<double>(map[m],Healpix_undef)) { ++pix; result+=map[m]*map[m]; } return sqrt(result/pix); } /*! Returns the maximum absolute value in the map, ignoring undefined pixels. */ T absmax() const { using namespace std; T result=0; for (int m=0; m<npix_; ++m) if (!approx<double>(map[m],Healpix_undef)) { result = max(result,abs(map[m])); } return result; } }; #endif
pi3.c
/* * This code calculates pi using the formula to calculate * the atan(z) which is the integral from 0 to z of 1/(1+x*x) * times dx. atan(1) is 45 degrees or pi/4 * * Interestingly enough, this version disagrees with the others in * the last two decimal places shown. (floating point error * accumulation?) */ #include <stdio.h> #include <omp.h> static long num_steps = 100000; /* number of intervals */ double step; /* the size of the interval - dx */ #define NUM_THREADS 2 int main (void) { int i; /* Loop control variable */ double x; /* Actually not used */ double pi; /* final results */ double sum[NUM_THREADS]; /* Maintains partial sum for thread */ step = 1.0 / (double)num_steps; /* * This may be done more flexibly by using an environment * variable instead. */ omp_set_num_threads(NUM_THREADS); /* * Each thread executes the code below */ #pragma omp parallel { double x; /* The current x position for function evaluation */ int id; /* The identity of the thread */ id = omp_get_thread_num(); sum[id] = 0; /* * We didn't need to make i private because the pragma * below does that for us. However, it may cost us some * as more processes may be spawned */ #pragma omp for /* * Calculate the integral */ for (i = id; i < num_steps; i++) { x = (i + 0.5) * step; sum[id] += 4.0 / (1.0 + x * x); } } /* * Multiply by dx */ for (i = 0, pi = 0.0; i < NUM_THREADS; i++) { pi += sum[i] * step; } printf( "The computed value of pi is %f\n", pi); return 0; }
image_local_estimator.h
/* * estimator.h * * Created on: Mar 22, 2012 * Author: aitor */ #ifndef REC_FRAMEWORK_IMAGE_LOCAL_ESTIMATOR_H_ #define REC_FRAMEWORK_IMAGE_LOCAL_ESTIMATOR_H_ #include <v4r/core/macros.h> #include <v4r/common/faat_3d_rec_framework_defines.h> #include <pcl/search/search.h> namespace v4r { template<typename PointT> class V4R_EXPORTS ImageKeypointExtractor { protected: typedef typename pcl::PointCloud<PointT>::Ptr PointInTPtr; typedef typename pcl::PointCloud<PointT>::Ptr PointOutTPtr; typename pcl::PointCloud<PointT>::Ptr input_; float radius_; public: void setInputCloud (PointInTPtr & input) { input_ = input; } void setSupportRadius (float f) { radius_ = f; } virtual void compute (PointOutTPtr & keypoints) = 0; virtual void setNormals (const pcl::PointCloud<pcl::Normal>::Ptr & /*normals*/) { } virtual bool needNormals () { return false; } }; template<typename PointT> class V4R_EXPORTS UniformSamplingExtractor : public KeypointExtractor<PointT> { private: typedef typename pcl::PointCloud<PointT>::Ptr PointInTPtr; bool filter_planar_; using KeypointExtractor<PointT>::input_; using KeypointExtractor<PointT>::radius_; float sampling_density_; boost::shared_ptr<std::vector<std::vector<int> > > neighborhood_indices_; boost::shared_ptr<std::vector<std::vector<float> > > neighborhood_dist_; void filterPlanar (const PointInTPtr & input, std::vector<int> &kp_idx) { pcl::PointCloud<int> filtered_keypoints; //create a search object typename pcl::search::Search<PointT>::Ptr tree; if (input->isOrganized ()) tree.reset (new pcl::search::OrganizedNeighbor<PointT> ()); else tree.reset (new pcl::search::KdTree<PointT> (false)); tree->setInputCloud (input); neighborhood_indices_.reset (new std::vector<std::vector<int> >); neighborhood_indices_->resize (kp_idx.size ()); neighborhood_dist_.reset (new std::vector<std::vector<float> >); neighborhood_dist_->resize (kp_idx.size ()); filtered_keypoints.points.resize (kp_idx.size()); size_t kept = 0; for (size_t i = 0; i < kp_idx.size (); i++) { if (tree->radiusSearch (kp_idx[i], radius_, (*neighborhood_indices_)[kept], (*neighborhood_dist_)[kept])) { EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix; Eigen::Vector4f xyz_centroid; EIGEN_ALIGN16 Eigen::Vector3f eigenValues; EIGEN_ALIGN16 Eigen::Matrix3f eigenVectors; //compute planarity of the region computeMeanAndCovarianceMatrix (*input, (*neighborhood_indices_)[kept], covariance_matrix, xyz_centroid); pcl::eigen33 (covariance_matrix, eigenVectors, eigenValues); float eigsum = eigenValues.sum (); if (!pcl_isfinite(eigsum)) { PCL_ERROR("Eigen sum is not finite\n"); } if ((fabs (eigenValues[0] - eigenValues[1]) < 1.5e-4) || (eigsum != 0 && fabs (eigenValues[0] / eigsum) > 1.e-2)) { //region is not planar, add to filtered keypoint kp_idx[kept] = kp_idx[i]; kept++; } } } neighborhood_indices_->resize (kept); neighborhood_dist_->resize (kept); kp_idx.resize (kept); neighborhood_indices_->clear (); neighborhood_dist_->clear (); } public: void setFilterPlanar (bool b) { filter_planar_ = b; } void setSamplingDensity (float f) { sampling_density_ = f; } void compute (pcl::PointCloud<PointT> & keypoints) { pcl::UniformSampling<PointT> keypoint_extractor; keypoint_extractor.setRadiusSearch (sampling_density_); keypoint_extractor.setInputCloud (input_); pcl::PointCloud<int> keypoints_idxes; keypoint_extractor.compute (keypoints_idxes); if (filter_planar_) filterPlanar (input_, keypoints_idxes); std::vector<int> indices; indices.resize (keypoints_idxes.points.size ()); for (size_t i = 0; i < indices.size (); i++) indices[i] = keypoints_idxes.points[i]; pcl::copyPointCloud (*input_, indices, keypoints); } }; template<typename PointInT> class V4R_EXPORTS SIFTKeypointExtractor : public KeypointExtractor<PointInT> { typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; using KeypointExtractor<PointInT>::input_; using KeypointExtractor<PointInT>::radius_; public: void compute (typename pcl::PointCloud<PointT> & keypoints) { pcl::PointCloud<pcl::PointXYZI> intensity_keypoints; pcl::SIFTKeypoint<PointInT, pcl::PointXYZI> sift3D; sift3D.setScales (0.003f, 3, 2); sift3D.setMinimumContrast (0.1f); sift3D.setInputCloud (input_); sift3D.setSearchSurface (input_); sift3D.compute (intensity_keypoints); pcl::copyPointCloud (intensity_keypoints, keypoints); } }; template<typename PointInT> class V4R_EXPORTS SIFTSurfaceKeypointExtractor : public KeypointExtractor<PointInT> { typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; pcl::PointCloud<pcl::Normal>::Ptr normals_; using KeypointExtractor<PointInT>::input_; using KeypointExtractor<PointInT>::radius_; bool needNormals () { return true; } void setNormals (const pcl::PointCloud<pcl::Normal>::Ptr & normals) { normals_ = normals; } public: void compute (pcl::PointCloud<PointT> & keypoints) { if (!normals_ || (normals_->points.size () != input_->points.size ())) PCL_WARN("SIFTSurfaceKeypointExtractor -- Normals are not valid\n"); typename pcl::PointCloud<pcl::PointNormal>::Ptr input_cloud (new pcl::PointCloud<pcl::PointNormal>); input_cloud->width = input_->width; input_cloud->height = input_->height; input_cloud->points.resize (input_->width * input_->height); for (size_t i = 0; i < input_->points.size (); i++) { input_cloud->points[i].getVector3fMap () = input_->points[i].getVector3fMap (); input_cloud->points[i].getNormalVector3fMap () = normals_->points[i].getNormalVector3fMap (); } typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints (new pcl::PointCloud<pcl::PointXYZI>); pcl::SIFTKeypoint<pcl::PointNormal, pcl::PointXYZI> sift3D; sift3D.setScales (0.003f, 3, 2); sift3D.setMinimumContrast (0.0); sift3D.setInputCloud (input_cloud); sift3D.setSearchSurface (input_cloud); sift3D.compute (*intensity_keypoints); pcl::copyPointCloud (*intensity_keypoints, keypoints); } }; template<typename PointInT, typename NormalT = pcl::Normal> class V4R_EXPORTS HarrisKeypointExtractor : public KeypointExtractor<PointInT> { pcl::PointCloud<pcl::Normal>::Ptr normals_; typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; using KeypointExtractor<PointInT>::input_; using KeypointExtractor<PointInT>::radius_; typename pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI>::ResponseMethod m_; float non_max_radius_; float threshold_; public: HarrisKeypointExtractor () { m_ = pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI>::HARRIS; non_max_radius_ = 0.01f; threshold_ = 0.f; } bool needNormals () { return true; } void setNormals (const pcl::PointCloud<pcl::Normal>::Ptr & normals) { normals_ = normals; } void setThreshold(float t) { threshold_ = t; } void setResponseMethod (typename pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI>::ResponseMethod m) { m_ = m; } void setNonMaximaRadius(float r) { non_max_radius_ = r; } void compute (pcl::PointCloud<PointInT> & keypoints) { if (!normals_ || (normals_->points.size () != input_->points.size ())) PCL_WARN("HarrisKeypointExtractor -- Normals are not valid\n"); typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints (new pcl::PointCloud<pcl::PointXYZI>); pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI> harris; harris.setNonMaxSupression (true); harris.setRefine (false); harris.setThreshold (threshold_); harris.setInputCloud (input_); harris.setNormals (normals_); harris.setRadius (non_max_radius_); harris.setRadiusSearch (non_max_radius_); harris.setMethod (m_); harris.compute (*intensity_keypoints); pcl::copyPointCloud (*intensity_keypoints, keypoints); } }; template<typename PointInT, typename NormalT = pcl::Normal> class V4R_EXPORTS SUSANKeypointExtractor : public KeypointExtractor<PointInT> { pcl::PointCloud<pcl::Normal>::Ptr normals_; typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; using KeypointExtractor<PointInT>::input_; using KeypointExtractor<PointInT>::radius_; public: SUSANKeypointExtractor () { } bool needNormals () { return true; } void setNormals (const pcl::PointCloud<pcl::Normal>::Ptr & normals) { normals_ = normals; } void compute (pcl::PointCloud<PointInT> & keypoints) { if (!normals_ || (normals_->points.size () != input_->points.size ())) PCL_WARN("SUSANKeypointExtractor -- Normals are not valid\n"); typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints (new pcl::PointCloud<pcl::PointXYZI>); pcl::SUSANKeypoint<PointInT, pcl::PointXYZI> susan; susan.setNonMaxSupression (true); susan.setInputCloud (input_); susan.setNormals (normals_); susan.setRadius (0.01f); susan.setRadiusSearch (0.01f); susan.compute (*intensity_keypoints); pcl::copyPointCloud (*intensity_keypoints, keypoints); } }; template<typename PointInT, typename FeatureT> class V4R_EXPORTS LocalEstimator { protected: typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; typedef typename pcl::PointCloud<FeatureT>::Ptr FeatureTPtr; typename boost::shared_ptr<PreProcessorAndNormalEstimator<PointInT, pcl::Normal> > normal_estimator_; pcl::PointCloud<pcl::Normal>::Ptr normals_; std::vector<typename boost::shared_ptr<KeypointExtractor<PointInT> > > keypoint_extractor_; //this should be a vector float support_radius_; bool adaptative_MLS_; boost::shared_ptr<std::vector<std::vector<int> > > neighborhood_indices_; boost::shared_ptr<std::vector<std::vector<float> > > neighborhood_dist_; //std::vector< std::vector<int> > neighborhood_indices_; //std::vector< std::vector<float> > neighborhood_dist_; void computeKeypoints (PointInTPtr & cloud, pcl::PointCloud<PointInT> & keypoints, pcl::PointCloud<pcl::Normal>::Ptr & normals) { for (size_t i = 0; i < keypoint_extractor_.size (); i++) { keypoint_extractor_[i]->setInputCloud (cloud); if (keypoint_extractor_[i]->needNormals ()) keypoint_extractor_[i]->setNormals (normals); keypoint_extractor_[i]->setSupportRadius (support_radius_); PointInTPtr detected_keypoints; //std::vector<int> keypoint_indices; keypoint_extractor_[i]->compute (detected_keypoints); *keypoints += *detected_keypoints; } } public: LocalEstimator () { adaptative_MLS_ = false; keypoint_extractor_.clear (); } void setAdaptativeMLS (bool b) { adaptative_MLS_ = b; } virtual bool estimate (const PointInTPtr & in, PointInTPtr & processed, PointInTPtr & keypoints, FeatureTPtr & signatures)=0; void setNormalEstimator (boost::shared_ptr<PreProcessorAndNormalEstimator<PointInT, pcl::Normal> > & ne) { normal_estimator_ = ne; } /** * \brief Right now only uniformSampling keypoint extractor is allowed */ void addKeypointExtractor (boost::shared_ptr<KeypointExtractor<PointInT> > & ke) { keypoint_extractor_.push_back (ke); } void setKeypointExtractors (std::vector<typename boost::shared_ptr<KeypointExtractor<PointInT> > > & ke) { keypoint_extractor_ = ke; } void setSupportRadius (float r) { support_radius_ = r; } virtual bool needNormals () { return false; } void getNormals(pcl::PointCloud<pcl::Normal>::Ptr & normals) { normals = normals_; } /*void setFilterPlanar (bool b) { filter_planar_ = b; } void filterPlanar (PointInTPtr & input, KeypointCloud & keypoints_cloud) { pcl::PointCloud<int> filtered_keypoints; //create a search object typename pcl::search::Search<PointInT>::Ptr tree; if (input->isOrganized ()) tree.reset (new pcl::search::OrganizedNeighbor<PointInT> ()); else tree.reset (new pcl::search::KdTree<PointInT> (false)); tree->setInputCloud (input); //std::vector<int> nn_indices; //std::vector<float> nn_distances; neighborhood_indices_.reset (new std::vector<std::vector<int> >); neighborhood_indices_->resize (keypoints_cloud.points.size ()); neighborhood_dist_.reset (new std::vector<std::vector<float> >); neighborhood_dist_->resize (keypoints_cloud.points.size ()); filtered_keypoints.points.resize (keypoints_cloud.points.size ()); int good = 0; //#pragma omp parallel for num_threads(8) for (size_t i = 0; i < keypoints_cloud.points.size (); i++) { if (tree->radiusSearch (keypoints_cloud[i], support_radius_, (*neighborhood_indices_)[good], (*neighborhood_dist_)[good])) { EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix; Eigen::Vector4f xyz_centroid; EIGEN_ALIGN16 Eigen::Vector3f eigenValues; EIGEN_ALIGN16 Eigen::Matrix3f eigenVectors; //compute planarity of the region computeMeanAndCovarianceMatrix (*input, (*neighborhood_indices_)[good], covariance_matrix, xyz_centroid); pcl::eigen33 (covariance_matrix, eigenVectors, eigenValues); float eigsum = eigenValues.sum (); if (!pcl_isfinite(eigsum)) { PCL_ERROR("Eigen sum is not finite\n"); } if ((fabs (eigenValues[0] - eigenValues[1]) < 1.5e-4) || (eigsum != 0 && fabs (eigenValues[0] / eigsum) > 1.e-2)) { //region is not planar, add to filtered keypoint keypoints_cloud.points[good] = keypoints_cloud.points[i]; good++; } } } neighborhood_indices_->resize (good); neighborhood_dist_->resize (good); keypoints_cloud.points.resize (good); }*/ }; } #endif /* REC_FRAMEWORK_LOCAL_ESTIMATOR_H_ */
GB_unop__identity_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc32_fc32) // op(A') function: GB (_unop_tran__identity_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 1 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
resize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE % % R R E SS I ZZ E % % RRRR EEE SSS I ZZZ EEE % % R R E SS I ZZ E % % R R EEEEE SSSSS IIIII ZZZZZ EEEEE % % % % % % MagickCore Image Resize Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/magick.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resize-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" #if defined(MAGICKCORE_LQR_DELEGATE) #include <lqr.h> #endif /* Typedef declarations. */ struct _ResizeFilter { double (*filter)(const double,const ResizeFilter *), (*window)(const double,const ResizeFilter *), support, /* filter region of support - the filter support limit */ window_support, /* window support, usally equal to support (expert only) */ scale, /* dimension scaling to fit window support (usally 1.0) */ blur, /* x-scale (blur-sharpen) */ coefficient[7]; /* cubic coefficents for BC-cubic filters */ ResizeWeightingFunctionType filterWeightingType, windowWeightingType; size_t signature; }; /* Forward declaractions. */ static double I0(double x), BesselOrderOne(double), Sinc(const double, const ResizeFilter *), SincFast(const double, const ResizeFilter *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F i l t e r F u n c t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % These are the various filter and windowing functions that are provided. % % They are internal to this module only. See AcquireResizeFilterInfo() for % details of the access to these functions, via the GetResizeFilterSupport() % and GetResizeFilterWeight() API interface. % % The individual filter functions have this format... % % static MagickRealtype *FilterName(const double x,const double support) % % A description of each parameter follows: % % o x: the distance from the sampling point generally in the range of 0 to % support. The GetResizeFilterWeight() ensures this a positive value. % % o resize_filter: current filter information. This allows function to % access support, and possibly other pre-calculated information defining % the functions. % */ static double Blackman(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Blackman: 2nd order cosine windowing function: 0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x) Refactored by Chantal Racette and Nicolas Robidoux to one trig call and five flops. */ const double cosine=cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.34+cosine*(0.5+cosine*0.16)); } static double Bohman(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Bohman: 2rd Order cosine windowing function: (1-x) cos(pi x) + sin(pi x) / pi. Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops, taking advantage of the fact that the support of Bohman is 1.0 (so that we know that sin(pi x) >= 0). */ const double cosine=cos((double) (MagickPI*x)); const double sine=sqrt(1.0-cosine*cosine); magick_unreferenced(resize_filter); return((1.0-x)*cosine+(1.0/MagickPI)*sine); } static double Box(const double magick_unused(x), const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(x); magick_unreferenced(resize_filter); /* A Box filter is a equal weighting function (all weights equal). DO NOT LIMIT results by support or resize point sampling will work as it requests points beyond its normal 0.0 support size. */ return(1.0); } static double Cosine(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Cosine window function: cos((pi/2)*x). */ return((double)cos((double) (MagickPI2*x))); } static double CubicBC(const double x,const ResizeFilter *resize_filter) { /* Cubic Filters using B,C determined values: Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears Spline B = 1 C = 0 B-Spline Gaussian approximation Hermite B = 0 C = 0 B-Spline interpolator See paper by Mitchell and Netravali, Reconstruction Filters in Computer Graphics Computer Graphics, Volume 22, Number 4, August 1988 http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/ Mitchell.pdf. Coefficents are determined from B,C values: P0 = ( 6 - 2*B )/6 = coeff[0] P1 = 0 P2 = (-18 +12*B + 6*C )/6 = coeff[1] P3 = ( 12 - 9*B - 6*C )/6 = coeff[2] Q0 = ( 8*B +24*C )/6 = coeff[3] Q1 = ( -12*B -48*C )/6 = coeff[4] Q2 = ( 6*B +30*C )/6 = coeff[5] Q3 = ( - 1*B - 6*C )/6 = coeff[6] which are used to define the filter: P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1 Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2 which ensures function is continuous in value and derivative (slope). */ if (x < 1.0) return(resize_filter->coefficient[0]+x*(x* (resize_filter->coefficient[1]+x*resize_filter->coefficient[2]))); if (x < 2.0) return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x* (resize_filter->coefficient[5]+x*resize_filter->coefficient[6]))); return(0.0); } static double CubicSpline(const double x,const ResizeFilter *resize_filter) { if (resize_filter->support <= 2.0) { /* 2-lobe Spline filter. */ if (x < 1.0) return(((x-9.0/5.0)*x-1.0/5.0)*x+1.0); if (x < 2.0) return(((-1.0/3.0*(x-1.0)+4.0/5.0)*(x-1.0)-7.0/15.0)*(x-1.0)); return(0.0); } if (resize_filter->support <= 3.0) { /* 3-lobe Spline filter. */ if (x < 1.0) return(((13.0/11.0*x-453.0/209.0)*x-3.0/209.0)*x+1.0); if (x < 2.0) return(((-6.0/11.0*(x-1.0)+270.0/209.0)*(x-1.0)-156.0/209.0)*(x-1.0)); if (x < 3.0) return(((1.0/11.0*(x-2.0)-45.0/209.0)*(x-2.0)+26.0/209.0)*(x-2.0)); return(0.0); } /* 4-lobe Spline filter. */ if (x < 1.0) return(((49.0/41.0*x-6387.0/2911.0)*x-3.0/2911.0)*x+1.0); if (x < 2.0) return(((-24.0/41.0*(x-1.0)+4032.0/2911.0)*(x-1.0)-2328.0/2911.0)*(x-1.0)); if (x < 3.0) return(((6.0/41.0*(x-2.0)-1008.0/2911.0)*(x-2.0)+582.0/2911.0)*(x-2.0)); if (x < 4.0) return(((-1.0/41.0*(x-3.0)+168.0/2911.0)*(x-3.0)-97.0/2911.0)*(x-3.0)); return(0.0); } static double Gaussian(const double x,const ResizeFilter *resize_filter) { /* Gaussian with a sigma = 1/2 (or as user specified) Gaussian Formula (1D) ... exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2)) Gaussian Formula (2D) ... exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) or for radius exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) Note that it is only a change from 1-d to radial form is in the normalization multiplier which is not needed or used when Gaussian is used as a filter. The constants are pre-calculated... coeff[0]=sigma; coeff[1]=1.0/(2.0*sigma^2); coeff[2]=1.0/(sqrt(2*PI)*sigma^2); exp( -coeff[1]*(x^2)) ) * coeff[2]; However the multiplier coeff[1] is need, the others are informative only. This separates the gaussian 'sigma' value from the 'blur/support' settings allowing for its use in special 'small sigma' gaussians, without the filter 'missing' pixels because the support becomes too small. */ return(exp((double)(-resize_filter->coefficient[1]*x*x))); } static double Hann(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Cosine window function: 0.5+0.5*cos(pi*x). */ const double cosine=cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.5+0.5*cosine); } static double Hamming(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Offset cosine window function: .54 + .46 cos(pi x). */ const double cosine=cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.54+0.46*cosine); } static double Jinc(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions. http://mathworld.wolfram.com/JincFunction.html and page 11 of http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf The original "zoom" program by Paul Heckbert called this "Bessel". But really it is more accurately named "Jinc". */ if (x == 0.0) return(0.5*MagickPI); return(BesselOrderOne(MagickPI*x)/x); } static double Kaiser(const double x,const ResizeFilter *resize_filter) { /* Kaiser Windowing Function (bessel windowing) I0( beta * sqrt( 1-x^2) ) / IO(0) Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5). However it is typically defined in terms of Alpha*PI The normalization factor (coeff[1]) is not actually needed, but without it the filters has a large value at x=0 making it difficult to compare the function with other windowing functions. */ return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]* sqrt((double) (1.0-x*x)))); } static double Lagrange(const double x,const ResizeFilter *resize_filter) { double value; register ssize_t i; ssize_t n, order; /* Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange function and depends on the overall support window size of the filter. That is: for a support of 2, it gives a lagrange-4 (piecewise cubic function). "n" identifies the piece of the piecewise polynomial. See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging, Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064. */ if (x > resize_filter->support) return(0.0); order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */ n=(ssize_t) (resize_filter->window_support+x); value=1.0f; for (i=0; i < order; i++) if (i != n) value*=(n-i-x)/(n-i); return(value); } static double Quadratic(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* 2rd order (quadratic) B-Spline approximation of Gaussian. */ if (x < 0.5) return(0.75-x*x); if (x < 1.5) return(0.5*(x-1.5)*(x-1.5)); return(0.0); } static double Sinc(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Scaled sinc(x) function using a trig call: sinc(x) == sin(pi x)/(pi x). */ if (x != 0.0) { const double alpha=(double) (MagickPI*x); return(sin((double) alpha)/alpha); } return((double) 1.0); } static double SincFast(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Approximations of the sinc function sin(pi x)/(pi x) over the interval [-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding from the Natural Sciences and Engineering Research Council of Canada. Although the approximations are polynomials (for low order of approximation) and quotients of polynomials (for higher order of approximation) and consequently are similar in form to Taylor polynomials / Pade approximants, the approximations are computed with a completely different technique. Summary: These approximations are "the best" in terms of bang (accuracy) for the buck (flops). More specifically: Among the polynomial quotients that can be computed using a fixed number of flops (with a given "+ - * / budget"), the chosen polynomial quotient is the one closest to the approximated function with respect to maximum absolute relative error over the given interval. The Remez algorithm, as implemented in the boost library's minimax package, is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/ math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html If outside of the interval of approximation, use the standard trig formula. */ if (x > 4.0) { const double alpha=(double) (MagickPI*x); return(sin((double) alpha)/alpha); } { /* The approximations only depend on x^2 (sinc is an even function). */ const double xx = x*x; #if MAGICKCORE_QUANTUM_DEPTH <= 8 /* Maximum absolute relative error 6.3e-6 < 1/2^17. */ const double c0 = 0.173610016489197553621906385078711564924e-2L; const double c1 = -0.384186115075660162081071290162149315834e-3L; const double c2 = 0.393684603287860108352720146121813443561e-4L; const double c3 = -0.248947210682259168029030370205389323899e-5L; const double c4 = 0.107791837839662283066379987646635416692e-6L; const double c5 = -0.324874073895735800961260474028013982211e-8L; const double c6 = 0.628155216606695311524920882748052490116e-10L; const double c7 = -0.586110644039348333520104379959307242711e-12L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #elif MAGICKCORE_QUANTUM_DEPTH <= 16 /* Max. abs. rel. error 2.2e-8 < 1/2^25. */ const double c0 = 0.173611107357320220183368594093166520811e-2L; const double c1 = -0.384240921114946632192116762889211361285e-3L; const double c2 = 0.394201182359318128221229891724947048771e-4L; const double c3 = -0.250963301609117217660068889165550534856e-5L; const double c4 = 0.111902032818095784414237782071368805120e-6L; const double c5 = -0.372895101408779549368465614321137048875e-8L; const double c6 = 0.957694196677572570319816780188718518330e-10L; const double c7 = -0.187208577776590710853865174371617338991e-11L; const double c8 = 0.253524321426864752676094495396308636823e-13L; const double c9 = -0.177084805010701112639035485248501049364e-15L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9)))))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #else /* Max. abs. rel. error 1.2e-12 < 1/2^39. */ const double c0 = 0.173611111110910715186413700076827593074e-2L; const double c1 = -0.289105544717893415815859968653611245425e-3L; const double c2 = 0.206952161241815727624413291940849294025e-4L; const double c3 = -0.834446180169727178193268528095341741698e-6L; const double c4 = 0.207010104171026718629622453275917944941e-7L; const double c5 = -0.319724784938507108101517564300855542655e-9L; const double c6 = 0.288101675249103266147006509214934493930e-11L; const double c7 = -0.118218971804934245819960233886876537953e-13L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); const double d0 = 1.0L; const double d1 = 0.547981619622284827495856984100563583948e-1L; const double d2 = 0.134226268835357312626304688047086921806e-2L; const double d3 = 0.178994697503371051002463656833597608689e-4L; const double d4 = 0.114633394140438168641246022557689759090e-6L; const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p); #endif } } static double Triangle(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* 1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function for Sinc(). */ if (x < 1.0) return(1.0-x); return(0.0); } static double Welch(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Welch parabolic windowing filter. */ if (x < 1.0) return(1.0-x*x); return(0.0); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResizeFilter() allocates the ResizeFilter structure. Choose from % these filters: % % FIR (Finite impulse Response) Filters % Box Triangle Quadratic % Spline Hermite Catrom % Mitchell % % IIR (Infinite impulse Response) Filters % Gaussian Sinc Jinc (Bessel) % % Windowed Sinc/Jinc Filters % Blackman Bohman Lanczos % Hann Hamming Cosine % Kaiser Welch Parzen % Bartlett % % Special Purpose Filters % Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp % Robidoux RobidouxSharp % % The users "-filter" selection is used to lookup the default 'expert' % settings for that filter from a internal table. However any provided % 'expert' settings (see below) may override this selection. % % FIR filters are used as is, and are limited to that filters support window % (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also % simply clipped by its support size (currently 1.5 or approximately 3*sigma % as recommended by many references) % % The special a 'cylindrical' filter flag will promote the default 4-lobed % Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better % suited to this style of image resampling. This typically happens when using % such a filter for images distortions. % % SPECIFIC FILTERS: % % Directly requesting 'Sinc', 'Jinc' function as a filter will force the use % of function without any windowing, or promotion for cylindrical usage. This % is not recommended, except by image processing experts, especially as part % of expert option filter function selection. % % Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is % computed using the traditional sin(pi*x)/(pi*x); it is selected if the user % specifically specifies the use of a Sinc filter. SincFast uses highly % accurate (and fast) polynomial (low Q) and rational (high Q) approximations, % and will be used by default in most cases. % % The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted % to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use). % The Sinc version is the most popular windowed filter. % % LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of % the Lanczos filter, specifically designed for EWA distortion (as a % Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos % (Sinc-Sinc) filter. The chosen blur value comes as close as possible to % satisfying the following condition without changing the character of the % corresponding EWA filter: % % 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with % only vertical or horizontal features are preserved when performing 'no-op" % with EWA distortion. % % The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos % filters. The 'sharp' version uses a blur factor of 0.9549963639785485, % again chosen because the resulting EWA filter comes as close as possible to % satisfying the above condition. % % Robidoux is another filter tuned for EWA. It is the Keys cubic filter % defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op' % Vertical and Horizontal Line Preservation Condition" exactly, and it % moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns % out to be close to both Mitchell and Lanczos2Sharp. For example, its first % crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the % first crossing of Mitchell and Lanczos2Sharp. % % RodidouxSharp is a slightly sharper version of Rodidoux, some believe it % is too sharp. It is designed to minimize the maximum possible change in % a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op % conditions. Amazingly Mitchell falls roughly between Rodidoux and % RodidouxSharp, though this seems to have been pure coincidence. % % 'EXPERT' OPTIONS: % % These artifact "defines" are not recommended for production use without % expert knowledge of resampling, filtering, and the effects they have on the % resulting resampled (resized or distorted) image. % % They can be used to override any and all filter default, and it is % recommended you make good use of "filter:verbose" to make sure that the % overall effect of your selection (before and after) is as expected. % % "filter:verbose" controls whether to output the exact results of the % filter selections made, as well as plotting data for graphing the % resulting filter over the filters support range. % % "filter:filter" select the main function associated with this filter % name, as the weighting function of the filter. This can be used to % set a windowing function as a weighting function, for special % purposes, such as graphing. % % If a "filter:window" operation has not been provided, a 'Box' % windowing function will be set to denote that no windowing function is % being used. % % "filter:window" Select this windowing function for the filter. While any % filter could be used as a windowing function, using the 'first lobe' of % that filter over the whole support window, using a non-windowing % function is not advisible. If no weighting filter function is specified % a 'SincFast' filter is used. % % "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a % simpler method of setting filter support size that will correctly % handle the Sinc/Jinc switch for an operators filtering requirements. % Only integers should be given. % % "filter:support" Set the support size for filtering to the size given. % This not recommended for Sinc/Jinc windowed filters (lobes should be % used instead). This will override any 'filter:lobes' option. % % "filter:win-support" Scale windowing function to this size instead. This % causes the windowing (or self-windowing Lagrange filter) to act is if % the support window it much much larger than what is actually supplied % to the calling operator. The filter however is still clipped to the % real support size given, by the support range supplied to the caller. % If unset this will equal the normal filter support size. % % "filter:blur" Scale the filter and support window by this amount. A value % of > 1 will generally result in a more blurred image with more ringing % effects, while a value <1 will sharpen the resulting image with more % aliasing effects. % % "filter:sigma" The sigma value to use for the Gaussian filter only. % Defaults to '1/2'. Using a different sigma effectively provides a % method of using the filter as a 'blur' convolution. Particularly when % using it for Distort. % % "filter:b" % "filter:c" Override the preset B,C values for a Cubic filter. % If only one of these are given it is assumes to be a 'Keys' type of % filter such that B+2C=1, where Keys 'alpha' value = C. % % Examples: % % Set a true un-windowed Sinc filter with 10 lobes (very slow): % -define filter:filter=Sinc % -define filter:lobes=8 % % Set an 8 lobe Lanczos (Sinc or Jinc) filter: % -filter Lanczos % -define filter:lobes=8 % % The format of the AcquireResizeFilter method is: % % ResizeFilter *AcquireResizeFilter(const Image *image, % const FilterType filter_type,const MagickBooleanType cylindrical, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filter: the filter type, defining a preset filter, window and support. % The artifact settings listed above will override those selections. % % o blur: blur the filter by this amount, use 1.0 if unknown. Image % artifact "filter:blur" will override this API call usage, including any % internal change (such as for cylindrical usage). % % o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial) % filter (Jinc). % % o exception: return any errors or warnings in this structure. % */ MagickPrivate ResizeFilter *AcquireResizeFilter(const Image *image, const FilterType filter,const MagickBooleanType cylindrical, ExceptionInfo *exception) { const char *artifact; FilterType filter_type, window_type; double B, C, value; register ResizeFilter *resize_filter; /* Table Mapping given Filter, into Weighting and Windowing functions. A 'Box' windowing function means its a simble non-windowed filter. An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a "cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was specifically requested by the user. WARNING: The order of this table must match the order of the FilterType enumeration specified in "resample.h", or the filter names will not match the filter being setup. You can check filter setups with the "filter:verbose" expert setting. */ static struct { FilterType filter, window; } const mapping[SentinelFilter] = { { UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */ { PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */ { BoxFilter, BoxFilter }, /* Box averaging filter */ { TriangleFilter, BoxFilter }, /* Linear interpolation filter */ { HermiteFilter, BoxFilter }, /* Hermite interpolation filter */ { SincFastFilter, HannFilter }, /* Hann -- cosine-sinc */ { SincFastFilter, HammingFilter }, /* Hamming -- '' variation */ { SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */ { GaussianFilter, BoxFilter }, /* Gaussian blur filter */ { QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */ { CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */ { CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */ { MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */ { JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */ { SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */ { SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */ { SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */ { LanczosFilter, WelchFilter }, /* Welch -- parabolic (3 lobe) */ { SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */ { SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */ { SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */ { LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */ { LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */ { LanczosSharpFilter, LanczosSharpFilter }, /* | these require */ { Lanczos2Filter, Lanczos2Filter }, /* | special handling */ { Lanczos2SharpFilter, Lanczos2SharpFilter }, { RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */ { RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */ { LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */ { SplineFilter, BoxFilter }, /* Spline Cubic Filter */ { LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */ { CubicSplineFilter, BoxFilter }, /* CubicSpline (2/3/4 lobes) */ }; /* Table mapping the filter/window from the above table to an actual function. The default support size for that filter as a weighting function, the range to scale with to use that function as a sinc windowing function, (typ 1.0). Note that the filter_type -> function is 1 to 1 except for Sinc(), SincFast(), and CubicBC() functions, which may have multiple filter to function associations. See "filter:verbose" handling below for the function -> filter mapping. */ static struct { double (*function)(const double,const ResizeFilter*), support, /* Default lobes/support size of the weighting filter. */ scale, /* Support when function used as a windowing function Typically equal to the location of the first zero crossing. */ B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */ ResizeWeightingFunctionType weightingFunctionType; } const filters[SentinelFilter] = { /* .--- support window (if used as a Weighting Function) | .--- first crossing (if used as a Windowing Function) | | .--- B value for Cubic Function | | | .---- C value for Cubic Function | | | | */ { Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Undefined (default to Box) */ { Box, 0.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Point (special handling) */ { Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Box */ { Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Triangle */ { CubicBC, 1.0, 1.0, 0.0, 0.0, CubicBCWeightingFunction }, /* Hermite (cubic B=C=0) */ { Hann, 1.0, 1.0, 0.0, 0.0, HannWeightingFunction }, /* Hann, cosine window */ { Hamming, 1.0, 1.0, 0.0, 0.0, HammingWeightingFunction }, /* Hamming, '' variation */ { Blackman, 1.0, 1.0, 0.0, 0.0, BlackmanWeightingFunction }, /* Blackman, 2*cosine window */ { Gaussian, 2.0, 1.5, 0.0, 0.0, GaussianWeightingFunction }, /* Gaussian */ { Quadratic, 1.5, 1.5, 0.0, 0.0, QuadraticWeightingFunction },/* Quadratic gaussian */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* General Cubic Filter */ { CubicBC, 2.0, 1.0, 0.0, 0.5, CubicBCWeightingFunction }, /* Catmull-Rom (B=0,C=1/2) */ { CubicBC, 2.0, 8.0/7.0, 1./3., 1./3., CubicBCWeightingFunction }, /* Mitchell (B=C=1/3) */ { Jinc, 3.0, 1.2196698912665045, 0.0, 0.0, JincWeightingFunction }, /* Raw 3-lobed Jinc */ { Sinc, 4.0, 1.0, 0.0, 0.0, SincWeightingFunction }, /* Raw 4-lobed Sinc */ { SincFast, 4.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Raw fast sinc ("Pade"-type) */ { Kaiser, 1.0, 1.0, 0.0, 0.0, KaiserWeightingFunction }, /* Kaiser (square root window) */ { Welch, 1.0, 1.0, 0.0, 0.0, WelchWeightingFunction }, /* Welch (parabolic window) */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Parzen (B-Spline window) */ { Bohman, 1.0, 1.0, 0.0, 0.0, BohmanWeightingFunction }, /* Bohman, 2*Cosine window */ { Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Bartlett (triangle window) */ { Lagrange, 2.0, 1.0, 0.0, 0.0, LagrangeWeightingFunction }, /* Lagrange sinc approximation */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 3-lobed Sinc-Sinc */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Sharpened */ { SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 2-lobed */ { SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos2, sharpened */ /* Robidoux: Keys cubic close to Lanczos2D sharpened */ { CubicBC, 2.0, 1.1685777620836932, 0.37821575509399867, 0.31089212245300067, CubicBCWeightingFunction }, /* RobidouxSharp: Sharper version of Robidoux */ { CubicBC, 2.0, 1.105822933719019, 0.2620145123990142, 0.3689927438004929, CubicBCWeightingFunction }, { Cosine, 1.0, 1.0, 0.0, 0.0, CosineWeightingFunction }, /* Low level cosine window */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Cubic B-Spline (B=1,C=0) */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Interger Radius */ { CubicSpline,2.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Spline Lobes 2-lobed */ }; /* The known zero crossings of the Jinc() or more accurately the Jinc(x*PI) function being used as a filter. It is used by the "filter:lobes" expert setting and for 'lobes' for Jinc functions in the previous table. This way users do not have to deal with the highly irrational lobe sizes of the Jinc filter. Values taken from http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp using Jv-function with v=1, then dividing by PI. */ static double jinc_zeros[16] = { 1.2196698912665045, 2.2331305943815286, 3.2383154841662362, 4.2410628637960699, 5.2427643768701817, 6.2439216898644877, 7.2447598687199570, 8.2453949139520427, 9.2458926849494673, 10.246293348754916, 11.246622794877883, 12.246898461138105, 13.247132522181061, 14.247333735806849, 15.247508563037300, 16.247661874700962 }; /* Allocate resize filter. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(UndefinedFilter < filter && filter < SentinelFilter); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) exception; resize_filter=(ResizeFilter *) AcquireCriticalMemory(sizeof(*resize_filter)); (void) memset(resize_filter,0,sizeof(*resize_filter)); /* Defaults for the requested filter. */ filter_type=mapping[filter].filter; window_type=mapping[filter].window; resize_filter->blur=1.0; /* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */ if ((cylindrical != MagickFalse) && (filter_type == SincFastFilter) && (filter != SincFastFilter)) filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */ /* Expert filter setting override */ artifact=GetImageArtifact(image,"filter:filter"); if (IsStringTrue(artifact) != MagickFalse) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { /* Raw filter request - no window function. */ filter_type=(FilterType) option; window_type=BoxFilter; } /* Filter override with a specific window function. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) window_type=(FilterType) option; } } else { /* Window specified, but no filter function? Assume Sinc/Jinc. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { filter_type= cylindrical != MagickFalse ? JincFilter : SincFastFilter; window_type=(FilterType) option; } } } /* Assign the real functions to use for the filters selected. */ resize_filter->filter=filters[filter_type].function; resize_filter->support=filters[filter_type].support; resize_filter->filterWeightingType=filters[filter_type].weightingFunctionType; resize_filter->window=filters[window_type].function; resize_filter->windowWeightingType=filters[window_type].weightingFunctionType; resize_filter->scale=filters[window_type].scale; resize_filter->signature=MagickCoreSignature; /* Filter Modifications for orthogonal/cylindrical usage */ if (cylindrical != MagickFalse) switch (filter_type) { case BoxFilter: /* Support for Cylindrical Box should be sqrt(2)/2 */ resize_filter->support=(double) MagickSQ1_2; break; case LanczosFilter: case LanczosSharpFilter: case Lanczos2Filter: case Lanczos2SharpFilter: case LanczosRadiusFilter: resize_filter->filter=filters[JincFilter].function; resize_filter->window=filters[JincFilter].function; resize_filter->scale=filters[JincFilter].scale; /* number of lobes (support window size) remain unchanged */ break; default: break; } /* Global Sharpening (regardless of orthoginal/cylindrical) */ switch (filter_type) { case LanczosSharpFilter: resize_filter->blur *= 0.9812505644269356; break; case Lanczos2SharpFilter: resize_filter->blur *= 0.9549963639785485; break; /* case LanczosRadius: blur adjust is done after lobes */ default: break; } /* Expert Option Modifications. */ /* User Gaussian Sigma Override - no support change */ if ((resize_filter->filter == Gaussian) || (resize_filter->window == Gaussian) ) { value=0.5; /* guassian sigma default, half pixel */ artifact=GetImageArtifact(image,"filter:sigma"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); /* Define coefficents for Gaussian */ resize_filter->coefficient[0]=value; /* note sigma too */ resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */ resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value); /* normalization - not actually needed or used! */ if ( value > 0.5 ) resize_filter->support *= 2*value; /* increase support linearly */ } /* User Kaiser Alpha Override - no support change */ if ((resize_filter->filter == Kaiser) || (resize_filter->window == Kaiser) ) { value=6.5; /* default beta value for Kaiser bessel windowing function */ artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */ if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-beta"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-alpha"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL)*MagickPI; /* Define coefficents for Kaiser Windowing Function */ resize_filter->coefficient[0]=value; /* alpha */ resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value)); /* normalization */ } /* Support Overrides */ artifact=GetImageArtifact(image,"filter:lobes"); if (artifact != (const char *) NULL) { ssize_t lobes; lobes=(ssize_t) StringToLong(artifact); if (lobes < 1) lobes=1; resize_filter->support=(double) lobes; } if (resize_filter->filter == Jinc) { /* Convert a Jinc function lobes value to a real support value. */ if (resize_filter->support > 16) resize_filter->support=jinc_zeros[15]; /* largest entry in table */ else resize_filter->support=jinc_zeros[((long) resize_filter->support)-1]; /* Blur this filter so support is a integer value (lobes dependant). */ if (filter_type == LanczosRadiusFilter) resize_filter->blur*=floor(resize_filter->support)/ resize_filter->support; } /* Expert blur override. */ artifact=GetImageArtifact(image,"filter:blur"); if (artifact != (const char *) NULL) resize_filter->blur*=StringToDouble(artifact,(char **) NULL); if (resize_filter->blur < MagickEpsilon) resize_filter->blur=(double) MagickEpsilon; /* Expert override of the support setting. */ artifact=GetImageArtifact(image,"filter:support"); if (artifact != (const char *) NULL) resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL)); /* Scale windowing function separately to the support 'clipping' window that calling operator is planning to actually use. (Expert override) */ resize_filter->window_support=resize_filter->support; /* default */ artifact=GetImageArtifact(image,"filter:win-support"); if (artifact != (const char *) NULL) resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL)); /* Adjust window function scaling to match windowing support for weighting function. This avoids a division on every filter call. */ resize_filter->scale/=resize_filter->window_support; /* * Set Cubic Spline B,C values, calculate Cubic coefficients. */ B=0.0; C=0.0; if ((resize_filter->filter == CubicBC) || (resize_filter->window == CubicBC) ) { B=filters[filter_type].B; C=filters[filter_type].C; if (filters[window_type].function == CubicBC) { B=filters[window_type].B; C=filters[window_type].C; } artifact=GetImageArtifact(image,"filter:b"); if (artifact != (const char *) NULL) { B=StringToDouble(artifact,(char **) NULL); C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */ artifact=GetImageArtifact(image,"filter:c"); /* user C override */ if (artifact != (const char *) NULL) C=StringToDouble(artifact,(char **) NULL); } else { artifact=GetImageArtifact(image,"filter:c"); if (artifact != (const char *) NULL) { C=StringToDouble(artifact,(char **) NULL); B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */ } } { const double twoB = B+B; /* Convert B,C values into Cubic Coefficents. See CubicBC(). */ resize_filter->coefficient[0]=1.0-(1.0/3.0)*B; resize_filter->coefficient[1]=-3.0+twoB+C; resize_filter->coefficient[2]=2.0-1.5*B-C; resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C; resize_filter->coefficient[4]=-8.0*C-twoB; resize_filter->coefficient[5]=B+5.0*C; resize_filter->coefficient[6]=(-1.0/6.0)*B-C; } } /* Expert Option Request for verbose details of the resulting filter. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp master { #endif if (IsStringTrue(GetImageArtifact(image,"filter:verbose")) != MagickFalse) { double support, x; /* Set the weighting function properly when the weighting function may not exactly match the filter of the same name. EG: a Point filter is really uses a Box weighting function with a different support than is typically used. */ if (resize_filter->filter == Box) filter_type=BoxFilter; if (resize_filter->filter == Sinc) filter_type=SincFilter; if (resize_filter->filter == SincFast) filter_type=SincFastFilter; if (resize_filter->filter == Jinc) filter_type=JincFilter; if (resize_filter->filter == CubicBC) filter_type=CubicFilter; if (resize_filter->window == Box) window_type=BoxFilter; if (resize_filter->window == Sinc) window_type=SincFilter; if (resize_filter->window == SincFast) window_type=SincFastFilter; if (resize_filter->window == Jinc) window_type=JincFilter; if (resize_filter->window == CubicBC) window_type=CubicFilter; /* Report Filter Details. */ support=GetResizeFilterSupport(resize_filter); /* practical_support */ (void) FormatLocaleFile(stdout, "# Resampling Filter (for graphing)\n#\n"); (void) FormatLocaleFile(stdout,"# filter = %s\n", CommandOptionToMnemonic(MagickFilterOptions,filter_type)); (void) FormatLocaleFile(stdout,"# window = %s\n", CommandOptionToMnemonic(MagickFilterOptions,window_type)); (void) FormatLocaleFile(stdout,"# support = %.*g\n", GetMagickPrecision(),(double) resize_filter->support); (void) FormatLocaleFile(stdout,"# window-support = %.*g\n", GetMagickPrecision(),(double) resize_filter->window_support); (void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n", GetMagickPrecision(),(double) resize_filter->blur); if ((filter_type == GaussianFilter) || (window_type == GaussianFilter)) (void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n", GetMagickPrecision(),(double) resize_filter->coefficient[0]); if ( filter_type == KaiserFilter || window_type == KaiserFilter ) (void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n", GetMagickPrecision(),(double) resize_filter->coefficient[0]); (void) FormatLocaleFile(stdout,"# practical-support = %.*g\n", GetMagickPrecision(), (double) support); if ((filter_type == CubicFilter) || (window_type == CubicFilter)) (void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n", GetMagickPrecision(),(double) B,GetMagickPrecision(),(double) C); (void) FormatLocaleFile(stdout,"\n"); /* Output values of resulting filter graph -- for graphing filter result. */ for (x=0.0; x <= support; x+=0.01f) (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x, GetMagickPrecision(),(double) GetResizeFilterWeight(resize_filter,x)); /* A final value so gnuplot can graph the 'stop' properly. */ (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support, GetMagickPrecision(),0.0); } /* Output the above once only for each image - remove setting */ (void) DeleteImageArtifact((Image *) image,"filter:verbose"); #if defined(MAGICKCORE_OPENMP_SUPPORT) } #endif return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveResizeImage() adaptively resize image with pixel resampling. % % This is shortcut function for a fast interpolative resize using mesh % interpolation. It works well for small resizes of less than +/- 50% % of the original image size. For larger resizing on images a full % filtered and slower resize function should be used instead. % % The format of the AdaptiveResizeImage method is: % % Image *AdaptiveResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { Image *resize_image; resize_image=InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel, exception); return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + B e s s e l O r d e r O n e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BesselOrderOne() computes the Bessel function of x of the first kind of % order 0. This is used to create the Jinc() filter function below. % % Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8] % % j1(x) = x*j1(x); % % For x in (8,inf) % % j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1)) % % where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow: % % cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4) % = 1/sqrt(2) * (sin(x) - cos(x)) % sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4) % = -1/sqrt(2) * (sin(x) + cos(x)) % % The format of the BesselOrderOne method is: % % double BesselOrderOne(double x) % % A description of each parameter follows: % % o x: double value. % */ #undef I0 static double I0(double x) { double sum, t, y; register ssize_t i; /* Zeroth order Bessel function of the first kind. */ sum=1.0; y=x*x/4.0; t=y; for (i=2; t > MagickEpsilon; i++) { sum+=t; t*=y/((double) i*i); } return(sum); } #undef J1 static double J1(double x) { double p, q; register ssize_t i; static const double Pone[] = { 0.581199354001606143928050809e+21, -0.6672106568924916298020941484e+20, 0.2316433580634002297931815435e+19, -0.3588817569910106050743641413e+17, 0.2908795263834775409737601689e+15, -0.1322983480332126453125473247e+13, 0.3413234182301700539091292655e+10, -0.4695753530642995859767162166e+7, 0.270112271089232341485679099e+4 }, Qone[] = { 0.11623987080032122878585294e+22, 0.1185770712190320999837113348e+20, 0.6092061398917521746105196863e+17, 0.2081661221307607351240184229e+15, 0.5243710262167649715406728642e+12, 0.1013863514358673989967045588e+10, 0.1501793594998585505921097578e+7, 0.1606931573481487801970916749e+4, 0.1e+1 }; p=Pone[8]; q=Qone[8]; for (i=7; i >= 0; i--) { p=p*x*x+Pone[i]; q=q*x*x+Qone[i]; } return(p/q); } #undef P1 static double P1(double x) { double p, q; register ssize_t i; static const double Pone[] = { 0.352246649133679798341724373e+5, 0.62758845247161281269005675e+5, 0.313539631109159574238669888e+5, 0.49854832060594338434500455e+4, 0.2111529182853962382105718e+3, 0.12571716929145341558495e+1 }, Qone[] = { 0.352246649133679798068390431e+5, 0.626943469593560511888833731e+5, 0.312404063819041039923015703e+5, 0.4930396490181088979386097e+4, 0.2030775189134759322293574e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } #undef Q1 static double Q1(double x) { double p, q; register ssize_t i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } static double BesselOrderOne(double x) { double p, q; if (x == 0.0) return(0.0); p=x; if (x < 0.0) x=(-x); if (x < 8.0) return(p*J1(x)); q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)- cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+ cos((double) x)))); if (p < 0.0) q=(-q); return(q); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResizeFilter() destroy the resize filter. % % The format of the DestroyResizeFilter method is: % % ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) % % A description of each parameter follows: % % o resize_filter: the resize filter. % */ MagickPrivate ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); resize_filter->signature=(~MagickCoreSignature); resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter); return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r S u p p o r t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterSupport() return the current support window size for this % filter. Note that this may have been enlarged by filter:blur factor. % % The format of the GetResizeFilterSupport method is: % % double GetResizeFilterSupport(const ResizeFilter *resize_filter) % % A description of each parameter follows: % % o filter: Image filter to use. % */ MagickPrivate double *GetResizeFilterCoefficient( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return((double *) resize_filter->coefficient); } MagickPrivate double GetResizeFilterBlur(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->blur); } MagickPrivate double GetResizeFilterScale(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->scale); } MagickPrivate double GetResizeFilterWindowSupport( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->window_support); } MagickPrivate ResizeWeightingFunctionType GetResizeFilterWeightingType( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->filterWeightingType); } MagickPrivate ResizeWeightingFunctionType GetResizeFilterWindowWeightingType( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->windowWeightingType); } MagickPrivate double GetResizeFilterSupport(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->support*resize_filter->blur); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r W e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterWeight evaluates the specified resize filter at the point x % which usally lies between zero and the filters current 'support' and % returns the weight of the filter function at that point. % % The format of the GetResizeFilterWeight method is: % % double GetResizeFilterWeight(const ResizeFilter *resize_filter, % const double x) % % A description of each parameter follows: % % o filter: the filter type. % % o x: the point. % */ MagickPrivate double GetResizeFilterWeight(const ResizeFilter *resize_filter, const double x) { double scale, weight, x_blur; /* Windowing function - scale the weighting filter by this amount. */ assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */ if ((resize_filter->window_support < MagickEpsilon) || (resize_filter->window == Box)) scale=1.0; /* Point or Box Filter -- avoid division by zero */ else { scale=resize_filter->scale; scale=resize_filter->window(x_blur*scale,resize_filter); } weight=scale*resize_filter->filter(x_blur,resize_filter); return(weight); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p o l a t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpolativeResizeImage() resizes an image using the specified % interpolation method. % % The format of the InterpolativeResizeImage method is: % % Image *InterpolativeResizeImage(const Image *image,const size_t columns, % const size_t rows,const PixelInterpolateMethod method, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *InterpolativeResizeImage(const Image *image, const size_t columns,const size_t rows,const PixelInterpolateMethod method, ExceptionInfo *exception) { #define InterpolativeResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; Image *resize_image; MagickBooleanType status; MagickOffsetType progress; PointInfo scale; ssize_t y; /* Interpolatively resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(resize_image,DirectClass,exception) == MagickFalse) { resize_image=DestroyImage(resize_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); scale.x=(double) image->columns/resize_image->columns; scale.y=(double) image->rows/resize_image->rows; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { PointInfo offset; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if (q == (Quantum *) NULL) continue; offset.y=((double) y+0.5)*scale.y-0.5; for (x=0; x < (ssize_t) resize_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait resize_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; offset.x=((double) x+0.5)*scale.x-0.5; status=InterpolatePixelChannels(image,image_view,resize_image,method, offset.x,offset.y,q,exception); if (status == MagickFalse) break; } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) resize_image=DestroyImage(resize_image); return(resize_image); } #if defined(MAGICKCORE_LQR_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i q u i d R e s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiquidRescaleImage() rescales image with seam carving. % % The format of the LiquidRescaleImage method is: % % Image *LiquidRescaleImage(const Image *image,const size_t columns, % const size_t rows,const double delta_x,const double rigidity, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the rescaled image. % % o rows: the number of rows in the rescaled image. % % o delta_x: maximum seam transversal step (0 means straight seams). % % o rigidity: introduce a bias for non-straight seams (typically 0). % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns, const size_t rows,const double delta_x,const double rigidity, ExceptionInfo *exception) { #define LiquidRescaleImageTag "Rescale/Image" CacheView *image_view, *rescale_view; gfloat *packet, *pixels; Image *rescale_image; int x_offset, y_offset; LqrCarver *carver; LqrRetVal lqr_status; MagickBooleanType status; MemoryInfo *pixel_info; register gfloat *q; ssize_t y; /* Liquid rescale image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); if ((columns <= 2) || (rows <= 2)) return(ResizeImage(image,columns,rows,image->filter,exception)); pixel_info=AcquireVirtualMemory(image->columns,image->rows*MaxPixelChannels* sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) return((Image *) NULL); pixels=(gfloat *) GetVirtualMemoryBlob(pixel_info); status=MagickTrue; q=pixels; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) *q++=QuantumScale*p[i]; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); carver=lqr_carver_new_ext(pixels,(int) image->columns,(int) image->rows, (int) GetPixelChannels(image),LQR_COLDEPTH_32F); if (carver == (LqrCarver *) NULL) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } lqr_carver_set_preserve_input_image(carver); lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity); lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows); (void) lqr_status; rescale_image=CloneImage(image,lqr_carver_get_width(carver), lqr_carver_get_height(carver),MagickTrue,exception); if (rescale_image == (Image *) NULL) { pixel_info=RelinquishVirtualMemory(pixel_info); return((Image *) NULL); } if (SetImageStorageClass(rescale_image,DirectClass,exception) == MagickFalse) { pixel_info=RelinquishVirtualMemory(pixel_info); rescale_image=DestroyImage(rescale_image); return((Image *) NULL); } rescale_view=AcquireAuthenticCacheView(rescale_image,exception); (void) lqr_carver_scan_reset(carver); while (lqr_carver_scan_ext(carver,&x_offset,&y_offset,(void **) &packet) != 0) { register Quantum *magick_restrict p; register ssize_t i; p=QueueCacheViewAuthenticPixels(rescale_view,x_offset,y_offset,1,1, exception); if (p == (Quantum *) NULL) break; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait rescale_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); rescale_traits=GetPixelChannelTraits(rescale_image,channel); if ((traits == UndefinedPixelTrait) || (rescale_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rescale_image,channel,ClampToQuantum(QuantumRange* packet[i]),p); } if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse) break; } rescale_view=DestroyCacheView(rescale_view); pixel_info=RelinquishVirtualMemory(pixel_info); lqr_carver_destroy(carver); return(rescale_image); } #else MagickExport Image *LiquidRescaleImage(const Image *image, const size_t magick_unused(columns),const size_t magick_unused(rows), const double magick_unused(delta_x),const double magick_unused(rigidity), ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError, "DelegateLibrarySupportNotBuiltIn","'%s' (LQR)",image->filename); return((Image *) NULL); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagnifyImage() doubles the size of the image with a pixel art scaling % algorithm. % % The format of the MagnifyImage method is: % % Image *MagnifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline void CopyPixels(const Quantum *source,const ssize_t source_offset, Quantum *destination,const ssize_t destination_offset,const size_t channels) { register ssize_t i; for (i=0; i < (ssize_t) channels; i++) destination[channels*destination_offset+i]=source[source_offset*channels+i]; } static inline void MixPixels(const Quantum *source,const ssize_t *source_offset, const size_t source_size,Quantum *destination, const ssize_t destination_offset,const size_t channels) { ssize_t sum; register ssize_t i; for (i=0; i < (ssize_t) channels; i++) { register ssize_t j; sum=0; for (j=0; j < (ssize_t) source_size; j++) sum+=source[source_offset[j]*channels+i]; destination[channels*destination_offset+i]=(Quantum) (sum/source_size); } } static inline void Mix2Pixels(const Quantum *source, const ssize_t source_offset1,const ssize_t source_offset2, Quantum *destination,const ssize_t destination_offset,const size_t channels) { const ssize_t offsets[2] = { source_offset1, source_offset2 }; MixPixels(source,offsets,2,destination,destination_offset,channels); } static inline int PixelsEqual(const Quantum *source1,ssize_t offset1, const Quantum *source2,ssize_t offset2,const size_t channels) { register ssize_t i; offset1*=channels; offset2*=channels; for (i=0; i < (ssize_t) channels; i++) if (source1[offset1+i] != source2[offset2+i]) return(0); return(1); } static inline void Eagle2X(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { ssize_t i; (void) source; for (i=0; i < 4; i++) CopyPixels(pixels,4,result,i,channels); if (PixelsEqual(pixels,0,pixels,1,channels) && PixelsEqual(pixels,1,pixels,3,channels)) CopyPixels(pixels,0,result,0,channels); if (PixelsEqual(pixels,1,pixels,2,channels) && PixelsEqual(pixels,2,pixels,5,channels)) CopyPixels(pixels,2,result,1,channels); if (PixelsEqual(pixels,3,pixels,6,channels) && PixelsEqual(pixels,6,pixels,7,channels)) CopyPixels(pixels,6,result,2,channels); if (PixelsEqual(pixels,5,pixels,8,channels) && PixelsEqual(pixels,8,pixels,7,channels)) CopyPixels(pixels,8,result,3,channels); } static void Hq2XHelper(const unsigned int rule,const Quantum *source, Quantum *destination,const ssize_t destination_offset,const size_t channels, const ssize_t e,const ssize_t a,const ssize_t b,const ssize_t d, const ssize_t f,const ssize_t h) { #define caseA(N,A,B,C,D) \ case N: \ { \ const ssize_t \ offsets[4] = { A, B, C, D }; \ \ MixPixels(source,offsets,4,destination,destination_offset,channels);\ break; \ } #define caseB(N,A,B,C,D,E,F,G,H) \ case N: \ { \ const ssize_t \ offsets[8] = { A, B, C, D, E, F, G, H }; \ \ MixPixels(source,offsets,8,destination,destination_offset,channels);\ break; \ } switch (rule) { case 0: { CopyPixels(source,e,destination,destination_offset,channels); break; } caseA(1,e,e,e,a) caseA(2,e,e,e,d) caseA(3,e,e,e,b) caseA(4,e,e,d,b) caseA(5,e,e,a,b) caseA(6,e,e,a,d) caseB(7,e,e,e,e,e,b,b,d) caseB(8,e,e,e,e,e,d,d,b) caseB(9,e,e,e,e,e,e,d,b) caseB(10,e,e,d,d,d,b,b,b) case 11: { const ssize_t offsets[16] = { e, e, e, e, e, e, e, e, e, e, e, e, e, e, d, b }; MixPixels(source,offsets,16,destination,destination_offset,channels); break; } case 12: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[4] = { e, e, d, b }; MixPixels(source,offsets,4,destination,destination_offset,channels); } else CopyPixels(source,e,destination,destination_offset,channels); break; } case 13: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[8] = { e, e, d, d, d, b, b, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else CopyPixels(source,e,destination,destination_offset,channels); break; } case 14: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[16] = { e, e, e, e, e, e, e, e, e, e, e, e, e, e, d, b }; MixPixels(source,offsets,16,destination,destination_offset,channels); } else CopyPixels(source,e,destination,destination_offset,channels); break; } case 15: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[4] = { e, e, d, b }; MixPixels(source,offsets,4,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, a }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } case 16: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[8] = { e, e, e, e, e, e, d, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, a }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } case 17: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[8] = { e, e, d, d, d, b, b, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, a }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } case 18: { if (PixelsEqual(source,b,source,f,channels)) { const ssize_t offsets[8] = { e, e, e, e, e, b, b, d }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, d }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } default: { if (PixelsEqual(source,d,source,h,channels)) { const ssize_t offsets[8] = { e, e, e, e, e, d, d, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, b }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } } #undef caseA #undef caseB } static inline unsigned int Hq2XPatternToNumber(const int *pattern) { ssize_t i; unsigned int result, order; result=0; order=1; for (i=7; i >= 0; i--) { result+=order*pattern[i]; order*=2; } return(result); } static inline void Hq2X(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { static const unsigned int Hq2XTable[] = { 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 15, 12, 5, 3, 17, 13, 4, 4, 6, 18, 4, 4, 6, 18, 5, 3, 12, 12, 5, 3, 1, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 17, 13, 5, 3, 16, 14, 4, 4, 6, 18, 4, 4, 6, 18, 5, 3, 16, 12, 5, 3, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 19, 12, 12, 5, 19, 16, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 19, 1, 12, 5, 19, 1, 14, 4, 4, 6, 2, 4, 4, 6, 18, 5, 3, 16, 12, 5, 19, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 15, 12, 5, 3, 17, 13, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 17, 13, 5, 3, 16, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 13, 5, 3, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 13, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 1, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 1, 12, 5, 3, 1, 14 }; const int pattern1[] = { !PixelsEqual(pixels,4,pixels,8,channels), !PixelsEqual(pixels,4,pixels,7,channels), !PixelsEqual(pixels,4,pixels,6,channels), !PixelsEqual(pixels,4,pixels,5,channels), !PixelsEqual(pixels,4,pixels,3,channels), !PixelsEqual(pixels,4,pixels,2,channels), !PixelsEqual(pixels,4,pixels,1,channels), !PixelsEqual(pixels,4,pixels,0,channels) }; #define Rotated(p) p[2], p[4], p[7], p[1], p[6], p[0], p[3], p[5] const int pattern2[] = { Rotated(pattern1) }; const int pattern3[] = { Rotated(pattern2) }; const int pattern4[] = { Rotated(pattern3) }; #undef Rotated Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern1)],pixels,result,0, channels,4,0,1,3,5,7); Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern2)],pixels,result,1, channels,4,2,5,1,7,3); Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern3)],pixels,result,3, channels,4,8,7,5,3,1); Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern4)],pixels,result,2, channels,4,6,3,7,1,5); } static void Fish2X(const Image *source,const Quantum *pixels,Quantum *result, const size_t channels) { #define Corner(A,B,C,D) \ { \ if (intensities[B] > intensities[A]) \ { \ ssize_t \ offsets[3] = { B, C, D }; \ \ MixPixels(pixels,offsets,3,result,3,channels); \ } \ else \ { \ ssize_t \ offsets[3] = { A, B, C }; \ \ MixPixels(pixels,offsets,3,result,3,channels); \ } \ } #define Line(A,B,C,D) \ { \ if (intensities[C] > intensities[A]) \ Mix2Pixels(pixels,C,D,result,3,channels); \ else \ Mix2Pixels(pixels,A,B,result,3,channels); \ } MagickFloatType intensities[9]; int ae, bd, ab, ad, be, de; register ssize_t i; ssize_t offsets[4] = { 0, 1, 3, 4 }; for (i=0; i < 9; i++) intensities[i]=GetPixelIntensity(source,pixels + i*channels); CopyPixels(pixels,0,result,0,channels); CopyPixels(pixels,(ssize_t) (intensities[0] > intensities[1] ? 0 : 1),result, 1,channels); CopyPixels(pixels,(ssize_t) (intensities[0] > intensities[3] ? 0 : 3),result, 2,channels); ae=PixelsEqual(pixels,0,pixels,4,channels); bd=PixelsEqual(pixels,1,pixels,3,channels); ab=PixelsEqual(pixels,0,pixels,1,channels); de=PixelsEqual(pixels,3,pixels,4,channels); ad=PixelsEqual(pixels,0,pixels,3,channels); be=PixelsEqual(pixels,1,pixels,4,channels); if (ae && bd && ab) { CopyPixels(pixels,0,result,3,channels); return; } if (ad && de && !ab) { Corner(1,0,4,3) return; } if (be && de && !ab) { Corner(0,1,3,4) return; } if (ad && ab && !be) { Corner(4,3,1,0) return; } if (ab && be && !ad) { Corner(3,0,4,1) return; } if (ae && (!bd || intensities[1] > intensities[0])) { Mix2Pixels(pixels,0,4,result,3,channels); return; } if (bd && (!ae || intensities[0] > intensities[1])) { Mix2Pixels(pixels,1,3,result,3,channels); return; } if (ab) { Line(0,1,3,4) return; } if (de) { Line(3,4,0,1) return; } if (ad) { Line(0,3,1,4) return; } if (be) { Line(1,4,0,3) return; } MixPixels(pixels,offsets,4,result,3,channels); #undef Corner #undef Line } static void Xbr2X(const Image *source,const Quantum *pixels,Quantum *result, const size_t channels) { #define WeightVar(M,N) const int w_##M##_##N = \ PixelsEqual(pixels,M,pixels,N,channels) ? 0 : 1; WeightVar(12,11) WeightVar(12,7) WeightVar(12,13) WeightVar(12,17) WeightVar(12,16) WeightVar(12,8) WeightVar(6,10) WeightVar(6,2) WeightVar(11,7) WeightVar(11,17) WeightVar(11,5) WeightVar(7,13) WeightVar(7,1) WeightVar(12,6) WeightVar(12,18) WeightVar(8,14) WeightVar(8,2) WeightVar(13,17) WeightVar(13,9) WeightVar(7,3) WeightVar(16,10) WeightVar(16,22) WeightVar(17,21) WeightVar(11,15) WeightVar(18,14) WeightVar(18,22) WeightVar(17,23) WeightVar(17,19) #undef WeightVar if ( w_12_16 + w_12_8 + w_6_10 + w_6_2 + (4 * w_11_7) < w_11_17 + w_11_5 + w_7_13 + w_7_1 + (4 * w_12_6) ) Mix2Pixels(pixels,(ssize_t) (w_12_11 <= w_12_7 ? 11 : 7),12,result,0, channels); else CopyPixels(pixels,12,result,0,channels); if ( w_12_18 + w_12_6 + w_8_14 + w_8_2 + (4 * w_7_13) < w_13_17 + w_13_9 + w_11_7 + w_7_3 + (4 * w_12_8) ) Mix2Pixels(pixels,(ssize_t) (w_12_7 <= w_12_13 ? 7 : 13),12,result,1, channels); else CopyPixels(pixels,12,result,1,channels); if ( w_12_6 + w_12_18 + w_16_10 + w_16_22 + (4 * w_11_17) < w_11_7 + w_11_15 + w_13_17 + w_17_21 + (4 * w_12_16) ) Mix2Pixels(pixels,(ssize_t) (w_12_11 <= w_12_17 ? 11 : 17),12,result,2, channels); else CopyPixels(pixels,12,result,2,channels); if ( w_12_8 + w_12_16 + w_18_14 + w_18_22 + (4 * w_13_17) < w_11_17 + w_17_23 + w_17_19 + w_7_13 + (4 * w_12_18) ) Mix2Pixels(pixels,(ssize_t) (w_12_13 <= w_12_17 ? 13 : 17),12,result,3, channels); else CopyPixels(pixels,12,result,3,channels); } static void Scale2X(const Image *source,const Quantum *pixels,Quantum *result, const size_t channels) { if (PixelsEqual(pixels,1,pixels,7,channels) || PixelsEqual(pixels,3,pixels,5,channels)) { register ssize_t i; for (i=0; i < 4; i++) CopyPixels(pixels,4,result,i,channels); return; } if (PixelsEqual(pixels,1,pixels,3,channels)) CopyPixels(pixels,3,result,0,channels); else CopyPixels(pixels,4,result,0,channels); if (PixelsEqual(pixels,1,pixels,5,channels)) CopyPixels(pixels,5,result,1,channels); else CopyPixels(pixels,4,result,1,channels); if (PixelsEqual(pixels,3,pixels,7,channels)) CopyPixels(pixels,3,result,2,channels); else CopyPixels(pixels,4,result,2,channels); if (PixelsEqual(pixels,5,pixels,7,channels)) CopyPixels(pixels,5,result,3,channels); else CopyPixels(pixels,4,result,3,channels); } static void Epbx2X(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { #define HelperCond(a,b,c,d,e,f,g) ( \ PixelsEqual(pixels,a,pixels,b,channels) && ( \ PixelsEqual(pixels,c,pixels,d,channels) || \ PixelsEqual(pixels,c,pixels,e,channels) || \ PixelsEqual(pixels,a,pixels,f,channels) || \ PixelsEqual(pixels,b,pixels,g,channels) \ ) \ ) register ssize_t i; for (i=0; i < 4; i++) CopyPixels(pixels,4,result,i,channels); if ( !PixelsEqual(pixels,3,pixels,5,channels) && !PixelsEqual(pixels,1,pixels,7,channels) && ( PixelsEqual(pixels,4,pixels,3,channels) || PixelsEqual(pixels,4,pixels,7,channels) || PixelsEqual(pixels,4,pixels,5,channels) || PixelsEqual(pixels,4,pixels,1,channels) || ( ( !PixelsEqual(pixels,0,pixels,8,channels) || PixelsEqual(pixels,4,pixels,6,channels) || PixelsEqual(pixels,3,pixels,2,channels) ) && ( !PixelsEqual(pixels,6,pixels,2,channels) || PixelsEqual(pixels,4,pixels,0,channels) || PixelsEqual(pixels,4,pixels,8,channels) ) ) ) ) { if (HelperCond(1,3,4,0,8,2,6)) Mix2Pixels(pixels,1,3,result,0,channels); if (HelperCond(5,1,4,2,6,8,0)) Mix2Pixels(pixels,5,1,result,1,channels); if (HelperCond(3,7,4,6,2,0,8)) Mix2Pixels(pixels,3,7,result,2,channels); if (HelperCond(7,5,4,8,0,6,2)) Mix2Pixels(pixels,7,5,result,3,channels); } #undef HelperCond } static inline void Eagle3X(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { ssize_t corner_tl, corner_tr, corner_bl, corner_br; corner_tl=PixelsEqual(pixels,0,pixels,1,channels) && PixelsEqual(pixels,0,pixels,3,channels); corner_tr=PixelsEqual(pixels,1,pixels,2,channels) && PixelsEqual(pixels,2,pixels,5,channels); corner_bl=PixelsEqual(pixels,3,pixels,6,channels) && PixelsEqual(pixels,6,pixels,7,channels); corner_br=PixelsEqual(pixels,5,pixels,7,channels) && PixelsEqual(pixels,7,pixels,8,channels); CopyPixels(pixels,(ssize_t) (corner_tl ? 0 : 4),result,0,channels); if (corner_tl && corner_tr) Mix2Pixels(pixels,0,2,result,1,channels); else CopyPixels(pixels,4,result,1,channels); CopyPixels(pixels,(ssize_t) (corner_tr ? 1 : 4),result,2,channels); if (corner_tl && corner_bl) Mix2Pixels(pixels,0,6,result,3,channels); else CopyPixels(pixels,4,result,3,channels); CopyPixels(pixels,4,result,4,channels); if (corner_tr && corner_br) Mix2Pixels(pixels,2,8,result,5,channels); else CopyPixels(pixels,4,result,5,channels); CopyPixels(pixels,(ssize_t) (corner_bl ? 3 : 4),result,6,channels); if (corner_bl && corner_br) Mix2Pixels(pixels,6,8,result,7,channels); else CopyPixels(pixels,4,result,7,channels); CopyPixels(pixels,(ssize_t) (corner_br ? 5 : 4),result,8,channels); } static inline void Eagle3XB(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { ssize_t corner_tl, corner_tr, corner_bl, corner_br; corner_tl=PixelsEqual(pixels,0,pixels,1,channels) && PixelsEqual(pixels,0,pixels,3,channels); corner_tr=PixelsEqual(pixels,1,pixels,2,channels) && PixelsEqual(pixels,2,pixels,5,channels); corner_bl=PixelsEqual(pixels,3,pixels,6,channels) && PixelsEqual(pixels,6,pixels,7,channels); corner_br=PixelsEqual(pixels,5,pixels,7,channels) && PixelsEqual(pixels,7,pixels,8,channels); CopyPixels(pixels,(ssize_t) (corner_tl ? 0 : 4),result,0,channels); CopyPixels(pixels,4,result,1,channels); CopyPixels(pixels,(ssize_t) (corner_tr ? 1 : 4),result,2,channels); CopyPixels(pixels,4,result,3,channels); CopyPixels(pixels,4,result,4,channels); CopyPixels(pixels,4,result,5,channels); CopyPixels(pixels,(ssize_t) (corner_bl ? 3 : 4),result,6,channels); CopyPixels(pixels,4,result,7,channels); CopyPixels(pixels,(ssize_t) (corner_br ? 5 : 4),result,8,channels); } static inline void Scale3X(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { if (!PixelsEqual(pixels,1,pixels,7,channels) && !PixelsEqual(pixels,3,pixels,5,channels)) { if (PixelsEqual(pixels,3,pixels,1,channels)) CopyPixels(pixels,3,result,0,channels); else CopyPixels(pixels,4,result,0,channels); if ( ( PixelsEqual(pixels,3,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,2,channels) ) || ( PixelsEqual(pixels,5,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,0,channels) ) ) CopyPixels(pixels,1,result,1,channels); else CopyPixels(pixels,4,result,1,channels); if (PixelsEqual(pixels,5,pixels,1,channels)) CopyPixels(pixels,5,result,2,channels); else CopyPixels(pixels,4,result,2,channels); if ( ( PixelsEqual(pixels,3,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,6,channels) ) || ( PixelsEqual(pixels,3,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,0,channels) ) ) CopyPixels(pixels,3,result,3,channels); else CopyPixels(pixels,4,result,3,channels); CopyPixels(pixels,4,result,4,channels); if ( ( PixelsEqual(pixels,5,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,8,channels) ) || ( PixelsEqual(pixels,5,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,2,channels) ) ) CopyPixels(pixels,5,result,5,channels); else CopyPixels(pixels,4,result,5,channels); if (PixelsEqual(pixels,3,pixels,7,channels)) CopyPixels(pixels,3,result,6,channels); else CopyPixels(pixels,4,result,6,channels); if ( ( PixelsEqual(pixels,3,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,8,channels) ) || ( PixelsEqual(pixels,5,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,6,channels) ) ) CopyPixels(pixels,7,result,7,channels); else CopyPixels(pixels,4,result,7,channels); if (PixelsEqual(pixels,5,pixels,7,channels)) CopyPixels(pixels,5,result,8,channels); else CopyPixels(pixels,4,result,8,channels); } else { register ssize_t i; for (i=0; i < 9; i++) CopyPixels(pixels,4,result,i,channels); } } MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception) { #define MagnifyImageTag "Magnify/Image" CacheView *image_view, *magnify_view; const char *option; Image *source_image, *magnify_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; RectangleInfo rectangle; ssize_t y; unsigned char magnification, width; void (*scaling_method)(const Image *,const Quantum *,Quantum *,size_t); /* Initialize magnified image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); option=GetImageOption(image->image_info,"magnify:method"); if (option == (char *) NULL) option="scale2x"; scaling_method=Scale2X; magnification=1; width=1; switch (*option) { case 'e': { if (LocaleCompare(option,"eagle2x") == 0) { scaling_method=Eagle2X; magnification=2; width=3; break; } if (LocaleCompare(option,"eagle3x") == 0) { scaling_method=Eagle3X; magnification=3; width=3; break; } if (LocaleCompare(option,"eagle3xb") == 0) { scaling_method=Eagle3XB; magnification=3; width=3; break; } if (LocaleCompare(option,"epbx2x") == 0) { scaling_method=Epbx2X; magnification=2; width=3; break; } break; } case 'f': { if (LocaleCompare(option,"fish2x") == 0) { scaling_method=Fish2X; magnification=2; width=3; break; } break; } case 'h': { if (LocaleCompare(option,"hq2x") == 0) { scaling_method=Hq2X; magnification=2; width=3; break; } break; } case 's': { if (LocaleCompare(option,"scale2x") == 0) { scaling_method=Scale2X; magnification=2; width=3; break; } if (LocaleCompare(option,"scale3x") == 0) { scaling_method=Scale3X; magnification=3; width=3; break; } break; } case 'x': { if (LocaleCompare(option,"xbr2x") == 0) { scaling_method=Xbr2X; magnification=2; width=5; } break; } default: break; } /* Make a working copy of the source image and convert it to RGB colorspace. */ source_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); offset.x=0; offset.y=0; rectangle.x=0; rectangle.y=0; rectangle.width=image->columns; rectangle.height=image->rows; (void) CopyImagePixels(source_image,image,&rectangle,&offset,exception); (void) SetImageColorspace(source_image,RGBColorspace,exception); magnify_image=CloneImage(source_image,magnification*source_image->columns, magnification*source_image->rows,MagickTrue,exception); if (magnify_image == (Image *) NULL) return((Image *) NULL); /* Magnify the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(source_image,exception); magnify_view=AcquireAuthenticCacheView(magnify_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,magnify_image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { Quantum r[128]; /* to hold result pixels */ register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(magnify_view,0,magnification*y, magnify_image->columns,magnification,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } /* Magnify this row of pixels. */ for (x=0; x < (ssize_t) source_image->columns; x++) { register const Quantum *magick_restrict p; size_t channels; register ssize_t i; ssize_t j; p=GetCacheViewVirtualPixels(image_view,x-width/2,y-width/2,width,width, exception); channels=GetPixelChannels(source_image); scaling_method(source_image,p,r,channels); /* Copy the result pixels into the final image. */ for (j=0; j < (ssize_t) magnification; j++) for (i=0; i < (ssize_t) (channels*magnification); i++) q[j*channels*magnify_image->columns+i]=r[j*magnification*channels+i]; q+=magnification*GetPixelChannels(magnify_image); } if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MagnifyImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } magnify_view=DestroyCacheView(magnify_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); if (status == MagickFalse) magnify_image=DestroyImage(magnify_image); return(magnify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M i n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MinifyImage() is a convenience method that scales an image proportionally to % half its size. % % The format of the MinifyImage method is: % % Image *MinifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception) { Image *minify_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter, exception); return(minify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResampleImage() resize image in terms of its pixel size, so that when % displayed at the given resolution it will be the same size in terms of % real world units as the original image at the original resolution. % % The format of the ResampleImage method is: % % Image *ResampleImage(Image *image,const double x_resolution, % const double y_resolution,const FilterType filter, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be resized to fit the given resolution. % % o x_resolution: the new image x resolution. % % o y_resolution: the new image y resolution. % % o filter: Image filter to use. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ResampleImage(const Image *image,const double x_resolution, const double y_resolution,const FilterType filter,ExceptionInfo *exception) { #define ResampleImageTag "Resample/Image" Image *resample_image; size_t height, width; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) (x_resolution*image->columns/(image->resolution.x == 0.0 ? 72.0 : image->resolution.x)+0.5); height=(size_t) (y_resolution*image->rows/(image->resolution.y == 0.0 ? 72.0 : image->resolution.y)+0.5); resample_image=ResizeImage(image,width,height,filter,exception); if (resample_image != (Image *) NULL) { resample_image->resolution.x=x_resolution; resample_image->resolution.y=y_resolution; } return(resample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResizeImage() scales an image to the desired dimensions, using the given % filter (see AcquireFilterInfo()). % % If an undefined filter is given the filter defaults to Mitchell for a % colormapped image, a image with a matte channel, or if the image is % enlarged. Otherwise the filter defaults to a Lanczos. % % ResizeImage() was inspired by Paul Heckbert's "zoom" program. % % The format of the ResizeImage method is: % % Image *ResizeImage(Image *image,const size_t columns,const size_t rows, % const FilterType filter,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o filter: Image filter to use. % % o exception: return any errors or warnings in this structure. % */ typedef struct _ContributionInfo { double weight; ssize_t pixel; } ContributionInfo; static ContributionInfo **DestroyContributionThreadSet( ContributionInfo **contribution) { register ssize_t i; assert(contribution != (ContributionInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (contribution[i] != (ContributionInfo *) NULL) contribution[i]=(ContributionInfo *) RelinquishAlignedMemory( contribution[i]); contribution=(ContributionInfo **) RelinquishMagickMemory(contribution); return(contribution); } static ContributionInfo **AcquireContributionThreadSet(const size_t count) { register ssize_t i; ContributionInfo **contribution; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads, sizeof(*contribution)); if (contribution == (ContributionInfo **) NULL) return((ContributionInfo **) NULL); (void) memset(contribution,0,number_threads*sizeof(*contribution)); for (i=0; i < (ssize_t) number_threads; i++) { contribution[i]=(ContributionInfo *) MagickAssumeAligned( AcquireAlignedMemory(count,sizeof(**contribution))); if (contribution[i] == (ContributionInfo *) NULL) return(DestroyContributionThreadSet(contribution)); } return(contribution); } static MagickBooleanType HorizontalFilter( const ResizeFilter *magick_restrict resize_filter, const Image *magick_restrict image,Image *magick_restrict resize_image, const double x_factor,const MagickSizeType span, MagickOffsetType *magick_restrict progress,ExceptionInfo *exception) { #define ResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **magick_restrict contributions; MagickBooleanType status; double scale, support; ssize_t x; /* Apply filter to resize horizontally from image to resize image. */ scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse) return(MagickFalse); if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(double) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,resize_image,resize_image->columns,1) #endif for (x=0; x < (ssize_t) resize_image->columns; x++) { const int id = GetOpenMPThreadId(); double bisect, density; register const Quantum *magick_restrict p; register ContributionInfo *magick_restrict contribution; register Quantum *magick_restrict q; register ssize_t y; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(double) (x+0.5)/x_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns); density=0.0; contribution=contributions[id]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((double) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if (n == 0) continue; if ((density != 0.0) && (density != 1.0)) { register ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception); q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) resize_image->rows; y++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait resize_traits, traits; register ssize_t j; ssize_t k; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; if (((resize_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2))) { j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-1.0)+0.5); k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j-start].pixel-contribution[0].pixel); SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i], q); continue; } pixel=0.0; if ((resize_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (j=0; j < n; j++) { k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j].pixel-contribution[0].pixel); alpha=contribution[j].weight; pixel+=alpha*p[k*GetPixelChannels(image)+i]; } SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q); continue; } /* Alpha blending. */ gamma=0.0; for (j=0; j < n; j++) { k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j].pixel-contribution[0].pixel); alpha=contribution[j].weight*QuantumScale* GetPixelAlpha(image,p+k*GetPixelChannels(image)); pixel+=alpha*p[k*GetPixelChannels(image)+i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif (*progress)++; proceed=SetImageProgress(image,ResizeImageTag,*progress,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } static MagickBooleanType VerticalFilter( const ResizeFilter *magick_restrict resize_filter, const Image *magick_restrict image,Image *magick_restrict resize_image, const double y_factor,const MagickSizeType span, MagickOffsetType *magick_restrict progress,ExceptionInfo *exception) { CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **magick_restrict contributions; double scale, support; MagickBooleanType status; ssize_t y; /* Apply filter to resize vertically from image to resize image. */ scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse) return(MagickFalse); if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(double) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { const int id = GetOpenMPThreadId(); double bisect, density; register const Quantum *magick_restrict p; register ContributionInfo *magick_restrict contribution; register Quantum *magick_restrict q; register ssize_t x; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(double) (y+0.5)/y_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows); density=0.0; contribution=contributions[id]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((double) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if (n == 0) continue; if ((density != 0.0) && (density != 1.0)) { register ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel, image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1), exception); q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) resize_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait resize_traits, traits; register ssize_t j; ssize_t k; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; if (((resize_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2))) { j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-1.0)+0.5); k=(ssize_t) ((contribution[j-start].pixel-contribution[0].pixel)* image->columns+x); SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i], q); continue; } pixel=0.0; if ((resize_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (j=0; j < n; j++) { k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[j].weight; pixel+=alpha*p[k*GetPixelChannels(image)+i]; } SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q); continue; } gamma=0.0; for (j=0; j < n; j++) { k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[j].weight*QuantumScale*GetPixelAlpha(image,p+k* GetPixelChannels(image)); pixel+=alpha*p[k*GetPixelChannels(image)+i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif (*progress)++; proceed=SetImageProgress(image,ResizeImageTag,*progress,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } MagickExport Image *ResizeImage(const Image *image,const size_t columns, const size_t rows,const FilterType filter,ExceptionInfo *exception) { double x_factor, y_factor; FilterType filter_type; Image *filter_image, *resize_image; MagickOffsetType offset; MagickSizeType span; MagickStatusType status; ResizeFilter *resize_filter; /* Acquire resize image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows) && (filter == UndefinedFilter)) return(CloneImage(image,0,0,MagickTrue,exception)); /* Acquire resize filter. */ x_factor=(double) columns/(double) image->columns; y_factor=(double) rows/(double) image->rows; filter_type=LanczosFilter; if (filter != UndefinedFilter) filter_type=filter; else if ((x_factor == 1.0) && (y_factor == 1.0)) filter_type=PointFilter; else if ((image->storage_class == PseudoClass) || (image->alpha_trait != UndefinedPixelTrait) || ((x_factor*y_factor) > 1.0)) filter_type=MitchellFilter; resize_filter=AcquireResizeFilter(image,filter_type,MagickFalse,exception); #if defined(MAGICKCORE_OPENCL_SUPPORT) resize_image=AccelerateResizeImage(image,columns,rows,resize_filter, exception); if (resize_image != (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(resize_image); } #endif resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(resize_image); } if (x_factor > y_factor) filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception); else filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception); if (filter_image == (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(DestroyImage(resize_image)); } /* Resize image. */ offset=0; if (x_factor > y_factor) { span=(MagickSizeType) (filter_image->columns+rows); status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span, &offset,exception); status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor, span,&offset,exception); } else { span=(MagickSizeType) (filter_image->rows+columns); status=VerticalFilter(resize_filter,image,filter_image,y_factor,span, &offset,exception); status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor, span,&offset,exception); } /* Free resources. */ filter_image=DestroyImage(filter_image); resize_filter=DestroyResizeFilter(resize_filter); if (status == MagickFalse) { resize_image=DestroyImage(resize_image); return((Image *) NULL); } resize_image->type=image->type; return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SampleImage() scales an image to the desired dimensions with pixel % sampling. Unlike other scaling methods, this method does not introduce % any additional color into the scaled image. % % The format of the SampleImage method is: % % Image *SampleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the sampled image. % % o rows: the number of rows in the sampled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SampleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleImageTag "Sample/Image" CacheView *image_view, *sample_view; Image *sample_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t x1; ssize_t *x_offset, y; PointInfo sample_offset; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); sample_image=CloneImage(image,columns,rows,MagickTrue,exception); if (sample_image == (Image *) NULL) return((Image *) NULL); /* Set the sampling offset, default is in the mid-point of sample regions. */ sample_offset.x=sample_offset.y=0.5-MagickEpsilon; { const char *value; value=GetImageArtifact(image,"sample:offset"); if (value != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; (void) ParseGeometry(value,&geometry_info); flags=ParseGeometry(value,&geometry_info); sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon; if ((flags & SigmaValue) != 0) sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon; } } /* Allocate scan line buffer and column offset buffers. */ x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns, sizeof(*x_offset)); if (x_offset == (ssize_t *) NULL) { sample_image=DestroyImage(sample_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (x1=0; x1 < (ssize_t) sample_image->columns; x1++) x_offset[x1]=(ssize_t) ((((double) x1+sample_offset.x)*image->columns)/ sample_image->columns); /* Sample each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sample_view=AcquireAuthenticCacheView(sample_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,sample_image,sample_image->rows,1) #endif for (y=0; y < (ssize_t) sample_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t y_offset; if (status == MagickFalse) continue; y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/ sample_image->rows); p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1, exception); q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Sample each column. */ for (x=0; x < (ssize_t) sample_image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(sample_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(sample_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(sample_image); i++) { PixelChannel channel; PixelTrait image_traits, traits; channel=GetPixelChannelChannel(sample_image,i); traits=GetPixelChannelTraits(sample_image,channel); image_traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (image_traits == UndefinedPixelTrait)) continue; SetPixelChannel(sample_image,channel,p[x_offset[x]*GetPixelChannels( image)+i],q); } q+=GetPixelChannels(sample_image); } if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sample_view=DestroyCacheView(sample_view); x_offset=(ssize_t *) RelinquishMagickMemory(x_offset); sample_image->type=image->type; if (status == MagickFalse) sample_image=DestroyImage(sample_image); return(sample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleImage() changes the size of an image to the given dimensions. % % The format of the ScaleImage method is: % % Image *ScaleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ScaleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define ScaleImageTag "Scale/Image" CacheView *image_view, *scale_view; double alpha, pixel[CompositePixelChannel], *scale_scanline, *scanline, *x_vector, *y_vector; Image *scale_image; MagickBooleanType next_column, next_row, proceed, status; PixelTrait scale_traits; PointInfo scale, span; register ssize_t i; ssize_t n, number_rows, y; /* Initialize scaled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); scale_image=CloneImage(image,columns,rows,MagickTrue,exception); if (scale_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(scale_image,DirectClass,exception) == MagickFalse) { scale_image=DestroyImage(scale_image); return((Image *) NULL); } /* Allocate memory. */ x_vector=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*x_vector)); scanline=x_vector; if (image->rows != scale_image->rows) scanline=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*scanline)); scale_scanline=(double *) AcquireQuantumMemory((size_t) scale_image->columns, MaxPixelChannels*sizeof(*scale_scanline)); y_vector=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*y_vector)); if ((scanline == (double *) NULL) || (scale_scanline == (double *) NULL) || (x_vector == (double *) NULL) || (y_vector == (double *) NULL)) { if ((image->rows != scale_image->rows) && (scanline != (double *) NULL)) scanline=(double *) RelinquishMagickMemory(scanline); if (scale_scanline != (double *) NULL) scale_scanline=(double *) RelinquishMagickMemory(scale_scanline); if (x_vector != (double *) NULL) x_vector=(double *) RelinquishMagickMemory(x_vector); if (y_vector != (double *) NULL) y_vector=(double *) RelinquishMagickMemory(y_vector); scale_image=DestroyImage(scale_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Scale image. */ number_rows=0; next_row=MagickTrue; span.y=1.0; scale.y=(double) scale_image->rows/(double) image->rows; (void) memset(y_vector,0,(size_t) MaxPixelChannels*image->columns* sizeof(*y_vector)); n=0; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); scale_view=AcquireAuthenticCacheView(scale_image,exception); for (y=0; y < (ssize_t) scale_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) break; q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } alpha=1.0; if (scale_image->rows == image->rows) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } } else { /* Scale Y direction. */ while (scale.y < span.y) { if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } number_rows++; } for (x=0; x < (ssize_t) image->columns; x++) for (i=0; i < (ssize_t) GetPixelChannels(image); i++) y_vector[x*GetPixelChannels(image)+i]+=scale.y* x_vector[x*GetPixelChannels(image)+i]; span.y-=scale.y; scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } number_rows++; next_row=MagickFalse; } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { pixel[i]=y_vector[x*GetPixelChannels(image)+i]+span.y* x_vector[x*GetPixelChannels(image)+i]; scanline[x*GetPixelChannels(image)+i]=pixel[i]; y_vector[x*GetPixelChannels(image)+i]=0.0; } } scale.y-=span.y; if (scale.y <= 0) { scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } span.y=1.0; } if (scale_image->columns == image->columns) { /* Transfer scanline to scaled image. */ for (x=0; x < (ssize_t) scale_image->columns; x++) { if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(scale_image); continue; } if (image->alpha_trait != UndefinedPixelTrait) { alpha=QuantumScale*scanline[x*GetPixelChannels(image)+ GetPixelChannelOffset(image,AlphaPixelChannel)]; alpha=PerceptibleReciprocal(alpha); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); scale_traits=GetPixelChannelTraits(scale_image,channel); if ((traits == UndefinedPixelTrait) || (scale_traits == UndefinedPixelTrait)) continue; if ((traits & BlendPixelTrait) == 0) { SetPixelChannel(scale_image,channel,ClampToQuantum( scanline[x*GetPixelChannels(image)+i]),q); continue; } SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*scanline[ x*GetPixelChannels(image)+i]),q); } q+=GetPixelChannels(scale_image); } } else { ssize_t t; /* Scale X direction. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; next_column=MagickFalse; span.x=1.0; t=0; for (x=0; x < (ssize_t) image->columns; x++) { scale.x=(double) scale_image->columns/(double) image->columns; while (scale.x >= span.x) { if (next_column != MagickFalse) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; t++; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; pixel[i]+=span.x*scanline[x*GetPixelChannels(image)+i]; scale_scanline[t*GetPixelChannels(image)+i]=pixel[i]; } scale.x-=span.x; span.x=1.0; next_column=MagickTrue; } if (scale.x > 0) { if (next_column != MagickFalse) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; next_column=MagickFalse; t++; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]+=scale.x*scanline[x*GetPixelChannels(image)+i]; span.x-=scale.x; } } if (span.x > 0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]+=span.x*scanline[(x-1)*GetPixelChannels(image)+i]; } if ((next_column == MagickFalse) && (t < (ssize_t) scale_image->columns)) for (i=0; i < (ssize_t) GetPixelChannels(image); i++) scale_scanline[t*GetPixelChannels(image)+i]=pixel[i]; /* Transfer scanline to scaled image. */ for (x=0; x < (ssize_t) scale_image->columns; x++) { if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(scale_image); continue; } if (image->alpha_trait != UndefinedPixelTrait) { alpha=QuantumScale*scale_scanline[x*GetPixelChannels(image)+ GetPixelChannelOffset(image,AlphaPixelChannel)]; alpha=PerceptibleReciprocal(alpha); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); scale_traits=GetPixelChannelTraits(scale_image,channel); if ((traits == UndefinedPixelTrait) || (scale_traits == UndefinedPixelTrait)) continue; if ((traits & BlendPixelTrait) == 0) { SetPixelChannel(scale_image,channel,ClampToQuantum( scale_scanline[x*GetPixelChannels(image)+i]),q); continue; } SetPixelChannel(scale_image,channel,ClampToQuantum(alpha* scale_scanline[x*GetPixelChannels(image)+i]),q); } q+=GetPixelChannels(scale_image); } } if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse) { status=MagickFalse; break; } proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } scale_view=DestroyCacheView(scale_view); image_view=DestroyCacheView(image_view); /* Free allocated memory. */ y_vector=(double *) RelinquishMagickMemory(y_vector); scale_scanline=(double *) RelinquishMagickMemory(scale_scanline); if (scale_image->rows != image->rows) scanline=(double *) RelinquishMagickMemory(scanline); x_vector=(double *) RelinquishMagickMemory(x_vector); scale_image->type=image->type; if (status == MagickFalse) scale_image=DestroyImage(scale_image); return(scale_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h u m b n a i l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThumbnailImage() changes the size of an image to the given dimensions and % removes any associated profiles. The goal is to produce small low cost % thumbnail images suited for display on the Web. % % The format of the ThumbnailImage method is: % % Image *ThumbnailImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ThumbnailImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleFactor 5 char filename[MagickPathExtent], value[MagickPathExtent]; const char *name; Image *thumbnail_image; double x_factor, y_factor; struct stat attributes; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); x_factor=(double) columns/(double) image->columns; y_factor=(double) rows/(double) image->rows; if ((x_factor*y_factor) > 0.1) thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception); else if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128)) thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception); else { Image *sample_image; sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows, exception); if (sample_image == (Image *) NULL) return((Image *) NULL); thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter, exception); sample_image=DestroyImage(sample_image); } if (thumbnail_image == (Image *) NULL) return(thumbnail_image); (void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page); if (thumbnail_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel,exception); thumbnail_image->depth=8; thumbnail_image->interlace=NoInterlace; /* Strip all profiles except color profiles. */ ResetImageProfileIterator(thumbnail_image); for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; ) { if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) { (void) DeleteImageProfile(thumbnail_image,name); ResetImageProfileIterator(thumbnail_image); } name=GetNextImageProfile(thumbnail_image); } (void) DeleteImageProperty(thumbnail_image,"comment"); (void) CopyMagickString(value,image->magick_filename,MagickPathExtent); if (strstr(image->magick_filename,"//") == (char *) NULL) (void) FormatLocaleString(value,MagickPathExtent,"file://%s", image->magick_filename); (void) SetImageProperty(thumbnail_image,"Thumb::URI",value,exception); GetPathComponent(image->magick_filename,TailPath,filename); (void) CopyMagickString(value,filename,MagickPathExtent); if ( GetPathAttributes(image->filename,&attributes) != MagickFalse ) { (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) attributes.st_mtime); (void) SetImageProperty(thumbnail_image,"Thumb::MTime",value,exception); } (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) attributes.st_mtime); (void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",MagickPathExtent, value); (void) SetImageProperty(thumbnail_image,"Thumb::Size",value,exception); (void) FormatLocaleString(value,MagickPathExtent,"image/%s",image->magick); LocaleLower(value); (void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value,exception); (void) SetImageProperty(thumbnail_image,"software",MagickAuthoritativeURL, exception); (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) image->magick_columns); (void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value, exception); (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) image->magick_rows); (void) SetImageProperty(thumbnail_image,"Thumb::Image::Height",value, exception); (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) GetImageListLength(image)); (void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value, exception); return(thumbnail_image); }