source
stringlengths
3
92
c
stringlengths
26
2.25M
wyhash.h
/* Author: Wang Yi <godspeed_china@yeah.net> */ #ifndef wyhash_version_2 #define wyhash_version_2 #include <stdint.h> #include <string.h> #include <math.h> #if defined(_MSC_VER) && defined(_M_X64) #include <intrin.h> #pragma intrinsic(_umul128) #endif const uint64_t _wyp0=0xa0761d6478bd642full, _wyp1=0xe7037ed1a0b428dbull, _wyp2=0x8ebc6af09c88c6e3ull, _wyp3=0x589965cc75374cc3ull, _wyp4=0x1d8e4e27c47d124full; static inline uint64_t _wymum(uint64_t A, uint64_t B){ #ifdef __SIZEOF_INT128__ __uint128_t r=A; r*=B; return (r>>64)^r; #elif defined(_MSC_VER) && defined(_M_X64) A=_umul128(A, B, &B); return A^B; #else uint64_t ha=A>>32, hb=B>>32, la=(uint32_t)A, lb=(uint32_t)B, hi, lo; uint64_t rh=ha*hb, rm0=ha*lb, rm1=hb*la, rl=la*lb, t=rl+(rm0<<32), c=t<rl; lo=t+(rm1<<32); c+=lo<t; hi=rh+(rm0>>32)+(rm1>>32)+c; return hi^lo; #endif } static inline uint64_t _wymix0(uint64_t A, uint64_t B, uint64_t seed){ return _wymum(A^seed^_wyp0, B^seed^_wyp1); } static inline uint64_t _wymix1(uint64_t A, uint64_t B, uint64_t seed){ return _wymum(A^seed^_wyp2, B^seed^_wyp3); } static inline uint64_t _wyr08(const uint8_t *p){ uint8_t v; memcpy(&v, p, 1); return v; } static inline uint64_t _wyr16(const uint8_t *p){ uint16_t v; memcpy(&v, p, 2); return v; } static inline uint64_t _wyr32(const uint8_t *p){ uint32_t v; memcpy(&v, p, 4); return v; } static inline uint64_t _wyr64(const uint8_t *p){ uint64_t v; memcpy(&v, p, 8); return v; } static inline uint64_t __wyr64(const uint8_t *p){ return (_wyr32(p)<<32)|_wyr32(p+4); } //to avoid attacks, seed should be initialized as a secret static inline uint64_t wyhash(const void* key, uint64_t len, uint64_t seed){ const uint8_t *p=(const uint8_t*)key; uint64_t i; uint64_t mix = len; for(i=0; i+32<=len; i+=32, p+=32) seed=_wymix0(_wyr64(p),_wyr64(p+8),seed)^_wymix1(_wyr64(p+16),_wyr64(p+24),seed); switch(len&31){ case 0: mix= len ? mix: _wymum(_wyp0,seed); break; case 1: seed=_wymix0(_wyr08(p),_wyp4,seed); break; case 2: seed=_wymix0(_wyr16(p),_wyp4,seed); break; case 3: seed=_wymix0((_wyr16(p)<<8)|_wyr08(p+2),_wyp4,seed); break; case 4: seed=_wymix0(_wyr32(p),_wyp4,seed); break; case 5: seed=_wymix0((_wyr32(p)<<8)|_wyr08(p+4),_wyp4,seed); break; case 6: seed=_wymix0((_wyr32(p)<<16)|_wyr16(p+4),_wyp4,seed); break; case 7: seed=_wymix0((_wyr32(p)<<24)|(_wyr16(p+4)<<8)|_wyr08(p+6),_wyp4,seed); break; case 8: seed=_wymix0(__wyr64(p),_wyp4,seed); break; case 9: seed=_wymix0(__wyr64(p),_wyr08(p+8),seed); break; case 10: seed=_wymix0(__wyr64(p),_wyr16(p+8),seed); break; case 11: seed=_wymix0(__wyr64(p),(_wyr16(p+8)<<8)|_wyr08(p+8+2),seed); break; case 12: seed=_wymix0(__wyr64(p),_wyr32(p+8),seed); break; case 13: seed=_wymix0(__wyr64(p),(_wyr32(p+8)<<8)|_wyr08(p+8+4),seed); break; case 14: seed=_wymix0(__wyr64(p),(_wyr32(p+8)<<16)|_wyr16(p+8+4),seed); break; case 15: seed=_wymix0(__wyr64(p),(_wyr32(p+8)<<24)|(_wyr16(p+8+4)<<8)|_wyr08(p+8+6),seed); break; case 16: seed=_wymix0(__wyr64(p),__wyr64(p+8),seed); break; case 17: seed=_wymix0(__wyr64(p),__wyr64(p+8),seed)^_wymix1(_wyr08(p+16),_wyp4,seed); break; case 18: seed=_wymix0(__wyr64(p),__wyr64(p+8),seed)^_wymix1(_wyr16(p+16),_wyp4,seed); break; case 19: seed=_wymix0(__wyr64(p),__wyr64(p+8),seed)^_wymix1((_wyr16(p+16)<<8)|_wyr08(p+16+2),_wyp4,seed); break; case 20: seed=_wymix0(__wyr64(p),__wyr64(p+8),seed)^_wymix1(_wyr32(p+16),_wyp4,seed); break; case 21: seed=_wymix0(__wyr64(p),__wyr64(p+8),seed)^_wymix1((_wyr32(p+16)<<8)|_wyr08(p+16+4),_wyp4,seed); break; case 22: seed=_wymix0(__wyr64(p),__wyr64(p+8),seed)^_wymix1((_wyr32(p+16)<<16)|_wyr16(p+16+4),_wyp4,seed); break; case 23: seed=_wymix0(__wyr64(p),__wyr64(p+8),seed)^_wymix1((_wyr32(p+16)<<24)|(_wyr16(p+16+4)<<8)|_wyr08(p+16+6),_wyp4,seed); break; case 24: seed=_wymix0(__wyr64(p),__wyr64(p+8),seed)^_wymix1(__wyr64(p+16),_wyp4,seed); break; case 25: seed=_wymix0(__wyr64(p),__wyr64(p+8),seed)^_wymix1(__wyr64(p+16),_wyr08(p+24),seed); break; case 26: seed=_wymix0(__wyr64(p),__wyr64(p+8),seed)^_wymix1(__wyr64(p+16),_wyr16(p+24),seed); break; case 27: seed=_wymix0(__wyr64(p),__wyr64(p+8),seed)^_wymix1(__wyr64(p+16),(_wyr16(p+24)<<8)|_wyr08(p+24+2),seed); break; case 28: seed=_wymix0(__wyr64(p),__wyr64(p+8),seed)^_wymix1(__wyr64(p+16),_wyr32(p+24),seed); break; case 29: seed=_wymix0(__wyr64(p),__wyr64(p+8),seed)^_wymix1(__wyr64(p+16),(_wyr32(p+24)<<8)|_wyr08(p+24+4),seed); break; case 30: seed=_wymix0(__wyr64(p),__wyr64(p+8),seed)^_wymix1(__wyr64(p+16),(_wyr32(p+24)<<16)|_wyr16(p+24+4),seed); break; case 31: seed=_wymix0(__wyr64(p),__wyr64(p+8),seed)^_wymix1(__wyr64(p+16),(_wyr32(p+24)<<24)|(_wyr16(p+24+4)<<8)|_wyr08(p+24+6),seed); break; } return _wymum(seed^mix, _wyp4); } static inline uint64_t wyhash64(uint64_t A, uint64_t B){ return _wymum(_wymum(A^_wyp0, B^_wyp1), _wyp2); } static inline double wy2u01(uint64_t r){ const double _wynorm=1.0/(1ull<<52); return (r&0x000fffffffffffffull)*_wynorm; } static inline float wy2gau(uint64_t r){ const float _wynorm1=1.0f/(1ull<<20); return ((r&0x1fffff)+((r>>21)&0x1fffff)+(r>>43))*_wynorm1-3.0f; } static inline uint64_t wyrand(uint64_t *seed){ *seed+=_wyp0; return _wymum(*seed^_wyp1,*seed); } static uint64_t _wyrand_seed=0; #define WYRAND_MAX 0xffffffffffffffffull static inline void wysrand(uint64_t seed){ _wyrand_seed=seed; } static inline uint64_t wyrand(void){ uint64_t s; #if defined(_OPENMP) #pragma omp atomic capture #endif { _wyrand_seed += _wyp0; s = _wyrand_seed; } return _wymum(s^_wyp1,s); } #endif
dense_inplace.c
/* Copyright (c) 2016 Drew Schmidt All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Functions for computing covariance, (pearson) correlation, and cosine similarity #include <stdlib.h> #include <string.h> #include "utils/safeomp.h" #include "coop.h" #include "utils/fill.h" #include "utils/inverse.h" /* // O(1) storage static int coop_covar_vecvec_inplace(const int n, const double * const restrict x, const double * const restrict y, double *restrict cor) { int i; const double denom = (double) 1/n; double meanx; double meany; // :DDD double mmcp = 0.0; // minus-mean-crossproduct meanx = 0.0; meany = 0.0; PLEASE_VECTORIZE for (i=0; i<n; i++) { meanx += x[i]; meany += y[i]; } meanx *= denom; meany *= denom; PLEASE_VECTORIZE for (i=0; i<n; i++) mmcp += (x[i] - meanx) * (y[i] - meany); *cor = mmcp / ((double)(n-1)); return COOP_OK; } */ // O(m+n) storage static int co_mat_inplace(const int m, const int n, const double * const restrict x, double *restrict cov) { double *vec = malloc(m * sizeof(*vec)); CHECKMALLOC(vec); double *means = malloc(n * sizeof(*means)); if (means==NULL) { free(vec); return -1; } const double denom_mean = (double) 1./m; const double denom_cov = (double) 1./(m-1); // get column means #pragma omp parallel for shared(means) if (m*n > OMP_MIN_SIZE) for (int j=0; j<n; j++) { const size_t mj = m*j; means[j] = 0.0; SAFE_SIMD for (int i=0; i<m; i++) means[j] += x[i + mj]; means[j] *= denom_mean; } // co-operation for (int j=0; j<n; j++) { const size_t mj = m*j; memcpy(vec, x+mj, m*sizeof(*vec)); const double meanx = means[j]; PLEASE_VECTORIZE for (int k=0; k<m; k++) vec[k] -= meanx; #pragma omp parallel for shared(j, means, vec, cov) if(m*n > OMP_MIN_SIZE) for (int i=j; i<n; i++) { const size_t mi = m*i; const double meany = means[i]; double mmcp = 0.0; SAFE_SIMD for (int k=0; k<m; k++) mmcp += vec[k] * (x[k + mi] - meany); cov[i + n*j] = mmcp * denom_cov; } } free(vec); free(means); return COOP_OK; } // --------------------------------------------- // Interface // --------------------------------------------- int coop_pcor_mat_inplace(const bool inv, const int m, const int n, const double *const restrict x, double *restrict cor) { int check = co_mat_inplace(m, n, x, cor); CHECKRET(check); cosim_fill(n, cor); if (inv) { check = inv_sym_chol(n, cor); CHECKRET(check); } symmetrize(n, cor); return COOP_OK; } int coop_covar_mat_inplace(const bool inv, const int m, const int n, const double *const restrict x, double *restrict cov) { int check = co_mat_inplace(m, n, x, cov); CHECKRET(check); if (inv) { check = inv_sym_chol(n, cov); CHECKRET(check); } symmetrize(n, cov); return COOP_OK; }
CGOpenMPRuntime.h
//===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This provides a class for OpenMP runtime code generation. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #include "CGValue.h" #include "clang/AST/DeclOpenMP.h" #include "clang/AST/GlobalDecl.h" #include "clang/AST/Type.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringSet.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/IR/Function.h" #include "llvm/IR/ValueHandle.h" namespace llvm { class ArrayType; class Constant; class FunctionType; class GlobalVariable; class StructType; class Type; class Value; } // namespace llvm namespace clang { class Expr; class OMPDependClause; class OMPExecutableDirective; class OMPLoopDirective; class VarDecl; class OMPDeclareReductionDecl; class IdentifierInfo; namespace CodeGen { class Address; class CodeGenFunction; class CodeGenModule; /// A basic class for pre|post-action for advanced codegen sequence for OpenMP /// region. class PrePostActionTy { public: explicit PrePostActionTy() {} virtual void Enter(CodeGenFunction &CGF) {} virtual void Exit(CodeGenFunction &CGF) {} virtual ~PrePostActionTy() {} }; /// Class provides a way to call simple version of codegen for OpenMP region, or /// an advanced with possible pre|post-actions in codegen. class RegionCodeGenTy final { intptr_t CodeGen; typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &); CodeGenTy Callback; mutable PrePostActionTy *PrePostAction; RegionCodeGenTy() = delete; RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete; template <typename Callable> static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF, PrePostActionTy &Action) { return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action); } public: template <typename Callable> RegionCodeGenTy( Callable &&CodeGen, typename std::enable_if< !std::is_same<typename std::remove_reference<Callable>::type, RegionCodeGenTy>::value>::type * = nullptr) : CodeGen(reinterpret_cast<intptr_t>(&CodeGen)), Callback(CallbackFn<typename std::remove_reference<Callable>::type>), PrePostAction(nullptr) {} void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; } void operator()(CodeGenFunction &CGF) const; }; struct OMPTaskDataTy final { SmallVector<const Expr *, 4> PrivateVars; SmallVector<const Expr *, 4> PrivateCopies; SmallVector<const Expr *, 4> FirstprivateVars; SmallVector<const Expr *, 4> FirstprivateCopies; SmallVector<const Expr *, 4> FirstprivateInits; SmallVector<const Expr *, 4> LastprivateVars; SmallVector<const Expr *, 4> LastprivateCopies; SmallVector<const Expr *, 4> ReductionVars; SmallVector<const Expr *, 4> ReductionCopies; SmallVector<const Expr *, 4> ReductionOps; SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 4> Dependences; llvm::PointerIntPair<llvm::Value *, 1, bool> Final; llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule; llvm::PointerIntPair<llvm::Value *, 1, bool> Priority; llvm::Value *Reductions = nullptr; unsigned NumberOfParts = 0; bool Tied = true; bool Nogroup = false; }; /// Class intended to support codegen of all kind of the reduction clauses. class ReductionCodeGen { private: /// Data required for codegen of reduction clauses. struct ReductionData { /// Reference to the original shared item. const Expr *Ref = nullptr; /// Helper expression for generation of private copy. const Expr *Private = nullptr; /// Helper expression for generation reduction operation. const Expr *ReductionOp = nullptr; ReductionData(const Expr *Ref, const Expr *Private, const Expr *ReductionOp) : Ref(Ref), Private(Private), ReductionOp(ReductionOp) {} }; /// List of reduction-based clauses. SmallVector<ReductionData, 4> ClausesData; /// List of addresses of original shared variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses; /// Sizes of the reduction items in chars. SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes; /// Base declarations for the reduction items. SmallVector<const VarDecl *, 4> BaseDecls; /// Emits lvalue for shared expression. LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E); /// Emits upper bound for shared expression (if array section). LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E); /// Performs aggregate initialization. /// \param N Number of reduction item in the common list. /// \param PrivateAddr Address of the corresponding private item. /// \param SharedLVal Address of the original shared variable. /// \param DRD Declare reduction construct used for reduction item. void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, const OMPDeclareReductionDecl *DRD); public: ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> ReductionOps); /// Emits lvalue for a reduction item. /// \param N Number of the reduction item. void emitSharedLValue(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. void emitAggregateType(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. /// \param Size Size of the type in chars. void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size); /// Performs initialization of the private copy for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. /// \param DefaultInit Default initialization sequence that should be /// performed if no reduction specific initialization is found. /// \param SharedLVal Address of the original shared variable. void emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, llvm::function_ref<bool(CodeGenFunction &)> DefaultInit); /// Returns true if the private copy requires cleanups. bool needCleanups(unsigned N); /// Emits cleanup code for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Adjusts \p PrivatedAddr for using instead of the original variable /// address in normal operations. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Returns LValue for the reduction item. LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; } /// Returns the size of the reduction item (in chars and total number of /// elements in the item), or nullptr, if the size is a constant. std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const { return Sizes[N]; } /// Returns the base declaration of the reduction item. const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; } /// Returns the base declaration of the reduction item. const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; } /// Returns true if the initialization of the reduction item uses initializer /// from declare reduction construct. bool usesReductionInitializer(unsigned N) const; }; class CGOpenMPRuntime { public: /// Allows to disable automatic handling of functions used in target regions /// as those marked as `omp declare target`. class DisableAutoDeclareTargetRAII { CodeGenModule &CGM; bool SavedShouldMarkAsGlobal; public: DisableAutoDeclareTargetRAII(CodeGenModule &CGM); ~DisableAutoDeclareTargetRAII(); }; /// Manages list of nontemporal decls for the specified directive. class NontemporalDeclsRAII { CodeGenModule &CGM; const bool NeedToPush; public: NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S); ~NontemporalDeclsRAII(); }; /// Maps the expression for the lastprivate variable to the global copy used /// to store new value because original variables are not mapped in inner /// parallel regions. Only private copies are captured but we need also to /// store private copy in shared address. /// Also, stores the expression for the private loop counter and it /// threaprivate name. struct LastprivateConditionalData { llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>> DeclToUniqueName; LValue IVLVal; llvm::Function *Fn = nullptr; bool Disabled = false; }; /// Manages list of lastprivate conditional decls for the specified directive. class LastprivateConditionalRAII { enum class ActionToDo { DoNotPush, PushAsLastprivateConditional, DisableLastprivateConditional, }; CodeGenModule &CGM; ActionToDo Action = ActionToDo::DoNotPush; /// Check and try to disable analysis of inner regions for changes in /// lastprivate conditional. void tryToDisableInnerAnalysis(const OMPExecutableDirective &S, llvm::DenseSet<CanonicalDeclPtr<const Decl>> &NeedToAddForLPCsAsDisabled) const; LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S); public: explicit LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal); static LastprivateConditionalRAII disable(CodeGenFunction &CGF, const OMPExecutableDirective &S); ~LastprivateConditionalRAII(); }; protected: CodeGenModule &CGM; StringRef FirstSeparator, Separator; /// Constructor allowing to redefine the name separator for the variables. explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator, StringRef Separator); /// Creates offloading entry for the provided entry ID \a ID, /// address \a Addr, size \a Size, and flags \a Flags. virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Helper to emit outlined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Lambda codegen specific to an accelerator device. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emits object of ident_t type with info for source location. /// \param Flags Flags for OpenMP location. /// llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc, unsigned Flags = 0); /// Returns pointer to ident_t type. llvm::Type *getIdentTyPointerTy(); /// Gets thread id value for the current thread. /// llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc); /// Get the function name of an outlined region. // The name can be customized depending on the target. // virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; } /// Emits \p Callee function call with arguments \p Args with location \p Loc. void emitCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee Callee, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits address of the word in a memory where current thread id is /// stored. virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc); void setLocThreadIdInsertPt(CodeGenFunction &CGF, bool AtCurrentPoint = false); void clearLocThreadIdInsertPt(CodeGenFunction &CGF); /// Check if the default location must be constant. /// Default is false to support OMPT/OMPD. virtual bool isDefaultLocationConstant() const { return false; } /// Returns additional flags that can be stored in reserved_2 field of the /// default location. virtual unsigned getDefaultLocationReserved2Flags() const { return 0; } /// Tries to emit declare variant function for \p OldGD from \p NewGD. /// \param OrigAddr LLVM IR value for \p OldGD. /// \param IsForDefinition true, if requested emission for the definition of /// \p OldGD. /// \returns true, was able to emit a definition function for \p OldGD, which /// points to \p NewGD. virtual bool tryEmitDeclareVariant(const GlobalDecl &NewGD, const GlobalDecl &OldGD, llvm::GlobalValue *OrigAddr, bool IsForDefinition); /// Returns default flags for the barriers depending on the directive, for /// which this barier is going to be emitted. static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind); /// Get the LLVM type for the critical name. llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;} /// Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// llvm::Value *getCriticalRegionLock(StringRef CriticalName); private: /// Default const ident_t object used for initialization of all other /// ident_t objects. llvm::Constant *DefaultOpenMPPSource = nullptr; using FlagsTy = std::pair<unsigned, unsigned>; /// Map of flags and corresponding default locations. using OpenMPDefaultLocMapTy = llvm::DenseMap<FlagsTy, llvm::Value *>; OpenMPDefaultLocMapTy OpenMPDefaultLocMap; Address getOrCreateDefaultLocation(unsigned Flags); QualType IdentQTy; llvm::StructType *IdentTy = nullptr; /// Map for SourceLocation and OpenMP runtime library debug locations. typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy; OpenMPDebugLocMapTy OpenMPDebugLocMap; /// The type for a microtask which gets passed to __kmpc_fork_call(). /// Original representation is: /// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...); llvm::FunctionType *Kmpc_MicroTy = nullptr; /// Stores debug location and ThreadID for the function. struct DebugLocThreadIdTy { llvm::Value *DebugLoc; llvm::Value *ThreadID; /// Insert point for the service instructions. llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr; }; /// Map of local debug location, ThreadId and functions. typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy> OpenMPLocThreadIDMapTy; OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap; /// Map of UDRs and corresponding combiner/initializer. typedef llvm::DenseMap<const OMPDeclareReductionDecl *, std::pair<llvm::Function *, llvm::Function *>> UDRMapTy; UDRMapTy UDRMap; /// Map of functions and locally defined UDRs. typedef llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareReductionDecl *, 4>> FunctionUDRMapTy; FunctionUDRMapTy FunctionUDRMap; /// Map from the user-defined mapper declaration to its corresponding /// functions. llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap; /// Map of functions and their local user-defined mappers. using FunctionUDMMapTy = llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareMapperDecl *, 4>>; FunctionUDMMapTy FunctionUDMMap; /// Maps local variables marked as lastprivate conditional to their internal /// types. llvm::DenseMap<llvm::Function *, llvm::DenseMap<CanonicalDeclPtr<const Decl>, std::tuple<QualType, const FieldDecl *, const FieldDecl *, LValue>>> LastprivateConditionalToTypes; /// Type kmp_critical_name, originally defined as typedef kmp_int32 /// kmp_critical_name[8]; llvm::ArrayType *KmpCriticalNameTy; /// An ordered map of auto-generated variables to their unique names. /// It stores variables with the following names: 1) ".gomp_critical_user_" + /// <critical_section_name> + ".var" for "omp critical" directives; 2) /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate /// variables. llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator> InternalVars; /// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); llvm::Type *KmpRoutineEntryPtrTy = nullptr; QualType KmpRoutineEntryPtrQTy; /// Type typedef struct kmp_task { /// void * shareds; /**< pointer to block of pointers to /// shared vars */ /// kmp_routine_entry_t routine; /**< pointer to routine to call for /// executing task */ /// kmp_int32 part_id; /**< part id for the task */ /// kmp_routine_entry_t destructors; /* pointer to function to invoke /// deconstructors of firstprivate C++ objects */ /// } kmp_task_t; QualType KmpTaskTQTy; /// Saved kmp_task_t for task directive. QualType SavedKmpTaskTQTy; /// Saved kmp_task_t for taskloop-based directive. QualType SavedKmpTaskloopTQTy; /// Type typedef struct kmp_depend_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool in:1; /// bool out:1; /// } flags; /// } kmp_depend_info_t; QualType KmpDependInfoTy; /// struct kmp_dim { // loop bounds info casted to kmp_int64 /// kmp_int64 lo; // lower /// kmp_int64 up; // upper /// kmp_int64 st; // stride /// }; QualType KmpDimTy; /// Type struct __tgt_offload_entry{ /// void *addr; // Pointer to the offload entry info. /// // (function or global) /// char *name; // Name of the function or global. /// size_t size; // Size of the entry info (0 if it a function). /// int32_t flags; /// int32_t reserved; /// }; QualType TgtOffloadEntryQTy; /// Entity that registers the offloading constants that were emitted so /// far. class OffloadEntriesInfoManagerTy { CodeGenModule &CGM; /// Number of entries registered so far. unsigned OffloadingEntriesNum = 0; public: /// Base class of the entries info. class OffloadEntryInfo { public: /// Kind of a given entry. enum OffloadingEntryInfoKinds : unsigned { /// Entry is a target region. OffloadingEntryInfoTargetRegion = 0, /// Entry is a declare target variable. OffloadingEntryInfoDeviceGlobalVar = 1, /// Invalid entry info. OffloadingEntryInfoInvalid = ~0u }; protected: OffloadEntryInfo() = delete; explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {} explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order, uint32_t Flags) : Flags(Flags), Order(Order), Kind(Kind) {} ~OffloadEntryInfo() = default; public: bool isValid() const { return Order != ~0u; } unsigned getOrder() const { return Order; } OffloadingEntryInfoKinds getKind() const { return Kind; } uint32_t getFlags() const { return Flags; } void setFlags(uint32_t NewFlags) { Flags = NewFlags; } llvm::Constant *getAddress() const { return cast_or_null<llvm::Constant>(Addr); } void setAddress(llvm::Constant *V) { assert(!Addr.pointsToAliveValue() && "Address has been set before!"); Addr = V; } static bool classof(const OffloadEntryInfo *Info) { return true; } private: /// Address of the entity that has to be mapped for offloading. llvm::WeakTrackingVH Addr; /// Flags associated with the device global. uint32_t Flags = 0u; /// Order this entry was emitted. unsigned Order = ~0u; OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid; }; /// Return true if a there are no entries defined. bool empty() const; /// Return number of entries defined so far. unsigned size() const { return OffloadingEntriesNum; } OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {} // // Target region entries related. // /// Kind of the target registry entry. enum OMPTargetRegionEntryKind : uint32_t { /// Mark the entry as target region. OMPTargetRegionEntryTargetRegion = 0x0, /// Mark the entry as a global constructor. OMPTargetRegionEntryCtor = 0x02, /// Mark the entry as a global destructor. OMPTargetRegionEntryDtor = 0x04, }; /// Target region entries info. class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo { /// Address that can be used as the ID of the entry. llvm::Constant *ID = nullptr; public: OffloadEntryInfoTargetRegion() : OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {} explicit OffloadEntryInfoTargetRegion(unsigned Order, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags), ID(ID) { setAddress(Addr); } llvm::Constant *getID() const { return ID; } void setID(llvm::Constant *V) { assert(!ID && "ID has been set before!"); ID = V; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoTargetRegion; } }; /// Initialize target region entry. void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, unsigned Order); /// Register target region entry. void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags); /// Return true if a target region entry with the provided information /// exists. bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum) const; /// brief Applies action \a Action on all registered entries. typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned, const OffloadEntryInfoTargetRegion &)> OffloadTargetRegionEntryInfoActTy; void actOnTargetRegionEntriesInfo( const OffloadTargetRegionEntryInfoActTy &Action); // // Device global variable entries related. // /// Kind of the global variable entry.. enum OMPTargetGlobalVarEntryKind : uint32_t { /// Mark the entry as a to declare target. OMPTargetGlobalVarEntryTo = 0x0, /// Mark the entry as a to declare target link. OMPTargetGlobalVarEntryLink = 0x1, }; /// Device global variable entries info. class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo { /// Type of the global variable. CharUnits VarSize; llvm::GlobalValue::LinkageTypes Linkage; public: OffloadEntryInfoDeviceGlobalVar() : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {} explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order, OMPTargetGlobalVarEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {} explicit OffloadEntryInfoDeviceGlobalVar( unsigned Order, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags), VarSize(VarSize), Linkage(Linkage) { setAddress(Addr); } CharUnits getVarSize() const { return VarSize; } void setVarSize(CharUnits Size) { VarSize = Size; } llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; } void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar; } }; /// Initialize device global variable entry. void initializeDeviceGlobalVarEntryInfo(StringRef Name, OMPTargetGlobalVarEntryKind Flags, unsigned Order); /// Register device global variable entry. void registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Checks if the variable with the given name has been registered already. bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const { return OffloadEntriesDeviceGlobalVar.count(VarName) > 0; } /// Applies action \a Action on all registered entries. typedef llvm::function_ref<void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)> OffloadDeviceGlobalVarEntryInfoActTy; void actOnDeviceGlobalVarEntriesInfo( const OffloadDeviceGlobalVarEntryInfoActTy &Action); private: // Storage for target region entries kind. The storage is to be indexed by // file ID, device ID, parent function name and line number. typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion> OffloadEntriesTargetRegionPerLine; typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine> OffloadEntriesTargetRegionPerParentName; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName> OffloadEntriesTargetRegionPerFile; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile> OffloadEntriesTargetRegionPerDevice; typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy; OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion; /// Storage for device global variable entries kind. The storage is to be /// indexed by mangled name. typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar> OffloadEntriesDeviceGlobalVarTy; OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar; }; OffloadEntriesInfoManagerTy OffloadEntriesInfoManager; bool ShouldMarkAsGlobal = true; /// List of the emitted declarations. llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls; /// List of the global variables with their addresses that should not be /// emitted for the target. llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables; /// List of variables that can become declare target implicitly and, thus, /// must be emitted. llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables; /// Mapping of the original functions to their variants and original global /// decl. llvm::MapVector<CanonicalDeclPtr<const FunctionDecl>, std::pair<GlobalDecl, GlobalDecl>> DeferredVariantFunction; using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>; /// Stack for list of declarations in current context marked as nontemporal. /// The set is the union of all current stack elements. llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack; /// Stack for list of addresses of declarations in current context marked as /// lastprivate conditional. The set is the union of all current stack /// elements. llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack; /// Flag for keeping track of weather a requires unified_shared_memory /// directive is present. bool HasRequiresUnifiedSharedMemory = false; /// Flag for keeping track of weather a target region has been emitted. bool HasEmittedTargetRegion = false; /// Flag for keeping track of weather a device routine has been emitted. /// Device routines are specific to the bool HasEmittedDeclareTargetRegion = false; /// Loads all the offload entries information from the host IR /// metadata. void loadOffloadInfoMetadata(); /// Returns __tgt_offload_entry type. QualType getTgtOffloadEntryQTy(); /// Start scanning from statement \a S and and emit all target regions /// found along the way. /// \param S Starting statement. /// \param ParentName Name of the function declaration that is being scanned. void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName); /// Build type kmp_routine_entry_t (if not built yet). void emitKmpRoutineEntryT(QualType KmpInt32Ty); /// Returns pointer to kmpc_micro type. llvm::Type *getKmpc_MicroPointerTy(); /// Returns specified OpenMP runtime function. /// \param Function OpenMP runtime function. /// \return Specified function. llvm::FunctionCallee createRuntimeFunction(unsigned Function); /// Returns __kmpc_for_static_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_next_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_fini_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize, bool IVSigned); /// If the specified mangled name is not in the module, create and /// return threadprivate cache object. This object is a pointer's worth of /// storage that's reserved for use by the OpenMP runtime. /// \param VD Threadprivate variable. /// \return Cache variable for the specified threadprivate. llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD); /// Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has /// linkage CommonLinkage by default and is initialized by null value. /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace = 0); /// Set of threadprivate variables with the generated initializer. llvm::StringSet<> ThreadPrivateWithDefinition; /// Set of declare target variables with the generated initializer. llvm::StringSet<> DeclareTargetWithDefinition; /// Emits initialization code for the threadprivate variables. /// \param VDAddr Address of the global variable \a VD. /// \param Ctor Pointer to a global init function for \a VD. /// \param CopyCtor Pointer to a global copy function for \a VD. /// \param Dtor Pointer to a global destructor function for \a VD. /// \param Loc Location of threadprivate declaration. void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor, llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc); /// Emit the array initialization or deletion portion for user-defined mapper /// code generation. void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF, llvm::Value *Handle, llvm::Value *BasePtr, llvm::Value *Ptr, llvm::Value *Size, llvm::Value *MapType, CharUnits ElementSize, llvm::BasicBlock *ExitBB, bool IsInit); struct TaskResultTy { llvm::Value *NewTask = nullptr; llvm::Function *TaskEntry = nullptr; llvm::Value *NewTaskNewTaskTTy = nullptr; LValue TDBase; const RecordDecl *KmpTaskTQTyRD = nullptr; llvm::Value *TaskDupFn = nullptr; }; /// Emit task region for the task directive. The task region is emitted in /// several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const OMPTaskDataTy &Data); /// Returns default address space for the constant firstprivates, 0 by /// default. virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; } /// Emit code that pushes the trip count of loops associated with constructs /// 'target teams distribute' and 'teams distribute parallel for'. /// \param SizeEmitter Emits the int64 value for the number of iterations of /// the associated loop. void emitTargetNumIterationsCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Value *DeviceID, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit update for lastprivate conditional data. void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal, StringRef UniqueDeclName, LValue LVal, SourceLocation Loc); public: explicit CGOpenMPRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM, ".", ".") {} virtual ~CGOpenMPRuntime() {} virtual void clear(); /// Emits code for OpenMP 'if' clause using specified \a CodeGen /// function. Here is the logic: /// if (Cond) { /// ThenGen(); /// } else { /// ElseGen(); /// } void emitIfClause(CodeGenFunction &CGF, const Expr *Cond, const RegionCodeGenTy &ThenGen, const RegionCodeGenTy &ElseGen); /// Checks if the \p Body is the \a CompoundStmt and returns its child /// statement iff there is only one that is not evaluatable at the compile /// time. static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body); /// Get the platform-specific name separator. std::string getName(ArrayRef<StringRef> Parts) const; /// Emit code for the specified user defined reduction construct. virtual void emitUserDefinedReduction(CodeGenFunction *CGF, const OMPDeclareReductionDecl *D); /// Get combiner/initializer for the specified user-defined reduction, if any. virtual std::pair<llvm::Function *, llvm::Function *> getUserDefinedReduction(const OMPDeclareReductionDecl *D); /// Emit the function for the user defined mapper construct. void emitUserDefinedMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF = nullptr); /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitParallelOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitTeamsOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// virtual llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts); /// Cleans up references to the objects in finished function. /// virtual void functionFinished(CodeGenFunction &CGF); /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond); /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr); /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. virtual void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc); /// Emits code for a taskyield directive. virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. virtual void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc); /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. virtual void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps); /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. virtual void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads); /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false); /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of distribute directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static chunked. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is dynamic. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule Kind specified in the 'schedule' clause. /// virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const; /// struct with the values to be passed to the dispatch runtime function struct DispatchRTInput { /// Loop lower bound llvm::Value *LB = nullptr; /// Loop upper bound llvm::Value *UB = nullptr; /// Chunk size specified using 'schedule' clause (nullptr if chunk /// was not specified) llvm::Value *Chunk = nullptr; DispatchRTInput() = default; DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk) : LB(LB), UB(UB), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues); /// Struct with the values to be passed to the static runtime function struct StaticRTInput { /// Size of the iteration variable in bits. unsigned IVSize = 0; /// Sign of the iteration variable. bool IVSigned = false; /// true if loop is ordered, false otherwise. bool Ordered = false; /// Address of the output variable in which the flag of the last iteration /// is returned. Address IL = Address::invalid(); /// Address of the output variable in which the lower iteration number is /// returned. Address LB = Address::invalid(); /// Address of the output variable in which the upper iteration number is /// returned. Address UB = Address::invalid(); /// Address of the output variable in which the stride value is returned /// necessary to generated the static_chunked scheduled loop. Address ST = Address::invalid(); /// Value of the chunk for the static_chunked scheduled loop. For the /// default (nullptr) value, the chunk 1 will be used. llvm::Value *Chunk = nullptr; StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL, Address LB, Address UB, Address ST, llvm::Value *Chunk = nullptr) : IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB), UB(UB), ST(ST), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values); /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values); /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned); /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind); /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST); /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. virtual void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc); /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. virtual void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc); /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc); /// Returns the address of the variable marked as declare target with link /// clause OR as declare target with to clause and unified memory. virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD); /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. virtual llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr); /// Emit a code for initialization of declare target variable. /// \param VD Declare target variable. /// \param Addr Address of the global variable \a VD. /// \param PerformInit true if initialization expression is not constant. virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD, llvm::GlobalVariable *Addr, bool PerformInit); /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name); /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc); /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit code for the directive that does not require outlining. /// /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param HasCancel true if region has inner cancel directive, false /// otherwise. virtual void emitInlinedDirective(CodeGenFunction &CGF, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool HasCancel = false); /// Emits reduction function. /// \param ArgsType Array type containing pointers to reduction variables. /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. llvm::Function *emitReductionFunction(SourceLocation Loc, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps); /// Emits single reduction combiner void emitSingleReductionCombiner(CodeGenFunction &CGF, const Expr *ReductionOp, const Expr *PrivateRef, const DeclRefExpr *LHS, const DeclRefExpr *RHS); struct ReductionOptionsTy { bool WithNowait; bool SimpleReduction; OpenMPDirectiveKind ReductionKind; }; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options); /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _task_red_item_t red_data[n]; /// ... /// red_data[i].shar = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_task_reduction_init(gtid, n, red_data); /// \endcode /// /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data); /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions + emits threadprivate variable to /// store the pointer to the original reduction item for the custom /// initializer defined by declare reduction construct. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N); /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal); /// Emit code for 'taskwait' directive. virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// virtual void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion); /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion); /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param SizeEmitter Callback to emit number of iterations for loop-based /// directives. virtual void emitTargetCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, const Expr *Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. virtual bool emitTargetFunctions(GlobalDecl GD); /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. virtual bool emitTargetGlobalVariable(GlobalDecl GD); /// Checks if the provided global decl \a GD is a declare target variable and /// registers it when emitting code for the host. virtual void registerTargetGlobalVariable(const VarDecl *VD, llvm::Constant *Addr); /// Registers provided target firstprivate variable as global on the /// target. llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF, const VarDecl *VD); /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. virtual bool emitTargetGlobal(GlobalDecl GD); /// Creates and returns a registration function for when at least one /// requires directives was used in the current module. llvm::Function *emitRequiresDirectiveRegFun(); /// Creates all the offload entries in the current compilation unit /// along with the associated metadata. void createOffloadEntriesAndInfoMetadata(); /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// virtual void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars); /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc); /// Struct that keeps all the relevant information that should be kept /// throughout a 'target data' region. class TargetDataInfo { /// Set to true if device pointer information have to be obtained. bool RequiresDevicePointerInfo = false; public: /// The array of base pointer passed to the runtime library. llvm::Value *BasePointersArray = nullptr; /// The array of section pointers passed to the runtime library. llvm::Value *PointersArray = nullptr; /// The array of sizes passed to the runtime library. llvm::Value *SizesArray = nullptr; /// The array of map types passed to the runtime library. llvm::Value *MapTypesArray = nullptr; /// The total number of pointers passed to the runtime library. unsigned NumberOfPtrs = 0u; /// Map between the a declaration of a capture and the corresponding base /// pointer address where the runtime returns the device pointers. llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap; explicit TargetDataInfo() {} explicit TargetDataInfo(bool RequiresDevicePointerInfo) : RequiresDevicePointerInfo(RequiresDevicePointerInfo) {} /// Clear information about the data arrays. void clearArrayInfo() { BasePointersArray = nullptr; PointersArray = nullptr; SizesArray = nullptr; MapTypesArray = nullptr; NumberOfPtrs = 0u; } /// Return true if the current target data information has valid arrays. bool isValid() { return BasePointersArray && PointersArray && SizesArray && MapTypesArray && NumberOfPtrs; } bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; } }; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. virtual void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info); /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device); /// Marks function \a Fn with properly mangled versions of vector functions. /// \param FD Function marked as 'declare simd'. /// \param Fn LLVM function that must be marked with 'declare simd' /// attributes. virtual void emitDeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn); /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations); /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. virtual void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C); /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. virtual const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const { return NativeParam; } /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. virtual Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const; /// Choose default schedule type and chunk value for the /// dist_schedule clause. virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind, llvm::Value *&Chunk) const {} /// Choose default schedule type and chunk value for the /// schedule clause. virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const; /// Emits call of the outlined function with the provided arguments, /// translating these arguments to correct target-specific arguments. virtual void emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits OpenMP-specific function prolog. /// Required for device constructs. virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D); /// Gets the OpenMP-specific address of the local variable. virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD); /// Marks the declaration as already emitted for the device code and returns /// true, if it was marked already, and false, otherwise. bool markAsGlobalTarget(GlobalDecl GD); /// Emit deferred declare target variables marked for deferred emission. void emitDeferredTargetDecls() const; /// Adjust some parameters for the target-based directives, like addresses of /// the variables captured by reference in lambdas. virtual void adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF, const OMPExecutableDirective &D) const; /// Perform check on requires decl to ensure that target architecture /// supports unified addressing virtual void checkArchForUnifiedAddressing(const OMPRequiresDecl *D); /// Checks if the variable has associated OMPAllocateDeclAttr attribute with /// the predefined allocator and translates it into the corresponding address /// space. virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS); /// Return whether the unified_shared_memory has been specified. bool hasRequiresUnifiedSharedMemory() const; /// Emits the definition of the declare variant function. virtual bool emitDeclareVariant(GlobalDecl GD, bool IsForDefinition); /// Checks if the \p VD variable is marked as nontemporal declaration in /// current context. bool isNontemporalDecl(const ValueDecl *VD) const; /// Create specialized alloca to handle lastprivate conditionals. Address emitLastprivateConditionalInit(CodeGenFunction &CGF, const VarDecl *VD); /// Checks if the provided \p LVal is lastprivate conditional and emits the /// code to update the value of the original variable. /// \code /// lastprivate(conditional: a) /// ... /// <type> a; /// lp_a = ...; /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// \endcode virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS); /// Checks if the lastprivate conditional was updated in inner region and /// writes the value. /// \code /// lastprivate(conditional: a) /// ... /// <type> a;bool Fired = false; /// #pragma omp ... shared(a) /// { /// lp_a = ...; /// Fired = true; /// } /// if (Fired) { /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// Fired = false; /// } /// \endcode virtual void checkAndEmitSharedLastprivateConditional( CodeGenFunction &CGF, const OMPExecutableDirective &D, const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls); /// Gets the address of the global copy used for lastprivate conditional /// update, if any. /// \param PrivLVal LValue for the private copy. /// \param VD Original lastprivate declaration. virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD, SourceLocation Loc); }; /// Class supports emissionof SIMD-only code. class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime { public: explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {} ~CGOpenMPSIMDRuntime() override {} /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitParallelOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitTeamsOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts) override; /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) override; /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr) override; /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc) override; /// Emits code for a taskyield directive. void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc) override; /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps) override; /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads) override; /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false) override; /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues) override; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) override; /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) override; /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned) override; /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind) override; /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST) override; /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc) override; /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc) override; /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc) override; /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr) override; /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name) override; /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc) override; /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) override; /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _task_red_item_t red_data[n]; /// ... /// red_data[i].shar = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_task_reduction_init(gtid, n, red_data); /// \endcode /// /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) override; /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions + emits threadprivate variable to /// store the pointer to the original reduction item for the custom /// initializer defined by declare reduction construct. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N) override; /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal) override; /// Emit code for 'taskwait' directive. void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion) override; /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion) override; /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) override; /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. void emitTargetCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, const Expr *Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter) override; /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. bool emitTargetFunctions(GlobalDecl GD) override; /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. bool emitTargetGlobalVariable(GlobalDecl GD) override; /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. bool emitTargetGlobal(GlobalDecl GD) override; /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars) override; /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc) override; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) override; /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device) override; /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations) override; /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C) override; /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const override; /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const override; /// Gets the OpenMP-specific address of the local variable. Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD) override { return Address::invalid(); } }; } // namespace CodeGen } // namespace clang #endif
lchol_csc_inspector.h
// // #include <cstdio> #include <vector> #include <assert.h> #include<set> #undef MIN #define MIN(x,y) ((x) < (y) ? (x) : (y)) #undef MAX #define MAX(x,y) ((x) > (y) ? (x) : (y)) /* * Computes the DAG of dependency after simplification */ void lchol_csc_inspector(int n, int *prunePtr, int *pruneSet, std::vector<std::vector<int>>& DAG){ #define pruneSet(Out_2,Out_4) pruneSet[Out_4] #define prunePtr(Out_2) prunePtr[Out_2] #define prunePtr_(Out_2) prunePtr[Out_2 + 1] #define s0(Out_2,Out_4,In_2) DAG[In_2].push_back( Out_2 ); // Omega generated Code Generated #pragma omp parallel for schedule(auto) for(int t1 = 1; t1 <= n-1; t1++) { for(int t2 = prunePtr(t1); t2 <= prunePtr_(t1)-1; t2++) { if (pruneSet(t1,t2) >= 0 && t1 >= pruneSet(t1,t2)+1) { int t3=pruneSet(t1,t2); s0(t1,t2,t3); } } } } /* * Computes the DAG of dependency after simplification void lch_csc_inspector(int n, int *prunePtr, int *pruneSet, std::vector<std::set<int>>& DAG){ #define pruneSet(Out_2,Out_4) pruneSet[Out_4] #define prunePtr(Out_2) prunePtr[Out_2] #define prunePtr_(Out_2) prunePtr[Out_2 + 1] #define s0(Out_2,Out_4,In_2) DAG[In_2].insert( Out_2 ); // Omega generated Code Generated #pragma omp parallel for schedule(auto) for(int t1 = 1; t1 <= n-1; t1++) { for(int t2 = prunePtr(t1); t2 <= prunePtr_(t1)-1; t2++) { if (pruneSet(t1,t2) >= 0 && t1 >= pruneSet(t1,t2)+1) { int t3=pruneSet(t1,t2); s0(t1,t2,t3); } } } } */
clike.c
/*** Likelihood implementation in C -------------------------------- Copyright (c) 2017 Johannes Buchner Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***/ #include<stdbool.h> #include<stdio.h> #include<stdlib.h> #include<assert.h> #include<math.h> #ifdef PARALLEL #include<omp.h> #endif #define IFVERBOSE if(0) #define IFDEBUG if(0) #define adouble double #define bdouble double #define sqr(x) (pow(x,2)) // Parallelisation does not work at the moment, you are welcome to fix it int like( const void * xp, const void * yyp, const int ndata, const int nx, const double A, const double mu, const double sig, const double noise_level, const void * data_maskp, void * Loutp ) { const adouble * x = (const adouble*) xp; const adouble * yy = (const adouble*) yyp; const bool * data_mask = (const bool*) data_maskp; adouble * Lout = (adouble*) Loutp; { #ifdef PARALLEL int k = 0; #pragma omp parallel for // this is stupid because it does not actually safe model evaluations, // but at least it should run faster for our testing purposes. for (int i = 0; i < ndata; i++) { if (data_mask[i]) { Lout[k] = 0; for (int j = 0; j < nx; j++) { const double ypred = A * exp(-0.5 * sqr((mu - x[j])/sig)); IFVERBOSE printf("y %d %d: %f %f\n", i, j, yy[i + j*ndata], ypred); Lout[k] += sqr((ypred - yy[i + j*ndata]) / noise_level); } k++; } } #else for (int j = 0; j < nx; j++) { const double ypred = A * exp(-0.5 * sqr((mu - x[j])/sig)); int k = 0; for (int i = 0; i < ndata; i++) { IFVERBOSE printf("data_mask %d: %d\n", i, data_mask[i]); if (data_mask[i]) { IFVERBOSE printf("y %d %d: %f %f\n", i, j, yy[i + j*ndata], ypred); Lout[k] += sqr((ypred - yy[i + j*ndata]) / noise_level); k++; } } } #endif } IFVERBOSE { int k = 0; for (int i = 0; i < ndata; i++) { if (data_mask[i]) { printf("L %d: %f\n", k, Lout[k]); k++; } } } return 0; }
GB_binop__hypot_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__hypot_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__hypot_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__hypot_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__hypot_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__hypot_fp32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__hypot_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__hypot_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__hypot_fp32) // C=scalar+B GB (_bind1st__hypot_fp32) // C=scalar+B' GB (_bind1st_tran__hypot_fp32) // C=A+scalar GB (_bind2nd__hypot_fp32) // C=A'+scalar GB (_bind2nd_tran__hypot_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = hypotf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = hypotf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_HYPOT || GxB_NO_FP32 || GxB_NO_HYPOT_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__hypot_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__hypot_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__hypot_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__hypot_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__hypot_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__hypot_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__hypot_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__hypot_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__hypot_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = hypotf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__hypot_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = hypotf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = hypotf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__hypot_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = hypotf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__hypot_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ccl_haloprofile.c
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_roots.h> #include <gsl/gsl_integration.h> #include <gsl/gsl_sf_expint.h> #include "ccl.h" static double einasto_norm_integrand(double x, void *params) { double alpha = *((double *)(params)); return x*x*exp(-2*(pow(x,alpha)-1)/alpha); } void ccl_einasto_norm_integral(int n_m, double *r_s, double *r_delta, double *alpha, double *norm_out,int *status) { #pragma omp parallel default(none) \ shared(n_m, r_s, r_delta, alpha, norm_out, status) { int ii; int status_this=0; gsl_function F; gsl_integration_workspace *w = gsl_integration_workspace_alloc(1000); if (w == NULL) status_this = CCL_ERROR_MEMORY; if(status_this == 0) { #pragma omp for for(ii=0;ii<n_m;ii++) { int qagstatus; double result, eresult; double x_max = r_delta[ii]/r_s[ii]; F.function = &einasto_norm_integrand; F.params = &(alpha[ii]); qagstatus = gsl_integration_qag(&F, 0, x_max, 0, 1E-4, 1000, GSL_INTEG_GAUSS31, w, &result, &eresult); if(qagstatus != GSL_SUCCESS) { ccl_raise_gsl_warning(qagstatus, "ccl_haloprofile.c: ccl_einasto_norm_integral():"); status_this = CCL_ERROR_INTEG; result = NAN; } norm_out[ii] = 4 * M_PI * r_s[ii] * r_s[ii] * r_s[ii] * result; } } //end omp for gsl_integration_workspace_free(w); if(status_this) { #pragma omp atomic write *status = status_this; } } //end omp parallel } static double hernquist_norm_integrand(double x, void *params) { double opx=1+x; return x*x/(x*opx*opx*opx); } void ccl_hernquist_norm_integral(int n_m, double *r_s, double *r_delta, double *norm_out,int *status) { #pragma omp parallel default(none) \ shared(n_m, r_s, r_delta, norm_out, status) { int ii; int status_this=0; gsl_function F; gsl_integration_workspace *w = gsl_integration_workspace_alloc(1000); if (w == NULL) status_this = CCL_ERROR_MEMORY; if(status_this == 0) { #pragma omp for for(ii=0;ii<n_m;ii++) { int qagstatus; double result, eresult; double x_max = r_delta[ii]/r_s[ii]; F.function = &hernquist_norm_integrand; F.params = NULL; qagstatus = gsl_integration_qag(&F, 0, x_max, 0, 1E-4, 1000, GSL_INTEG_GAUSS31, w, &result, &eresult); if(qagstatus != GSL_SUCCESS) { ccl_raise_gsl_warning(qagstatus, "ccl_haloprofile.c: ccl_hernquist_norm_integral():"); status_this = CCL_ERROR_INTEG; result = NAN; } norm_out[ii] = 4 * M_PI * r_s[ii] * r_s[ii] * r_s[ii] * result; } } //end omp for gsl_integration_workspace_free(w); if(status_this) { #pragma omp atomic write *status = status_this; } } //end omp parallel }
transition_matrix.h
/* * Created on: Mar 22, 2016 * Author: Steffen Rechner <steffen.rechner@informatik.uni-halle.de> * * This file is part of the marathon software. * * Copyright (c) 2016, Steffen Rechner * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef INCLUDE_MARATHON_TRANSITIONMATRIX_H_ #define INCLUDE_MARATHON_TRANSITIONMATRIX_H_ #include "state_graph.h" #ifdef USE_ARMADILLO #include <armadillo> #endif #ifdef USE_BLAS #include <cblas.h> #endif namespace marathon { /** * Virtual Base Class for Transition Matrix. */ template<class T=double> class TransitionMatrix { protected: size_t N; // number of rows and columns size_t ld; // lead dimension (upper bound on n) std::vector<T> data; // actual data array public: /** * Standard Constructor. Create uninitialized transition matrix of size N times N. * @param N number of rows or columns */ TransitionMatrix(const size_t N) : N(N), ld(((N + 255) / 256) * 256) // lead dimension is next mulitple of 256 { data.resize(N * ld, 0); } /** * Constructor. Create Transition Matrix from State Graph. * @param sg Pointer to state graph object. */ TransitionMatrix(const StateGraph &sg) : TransitionMatrix(sg.getNumStates()) { for (const Transition *t : sg.getArcs()) { this->data[t->from * ld + t->to] = t->weight.convert_to<T>(); } } /** * Return size of the matrix. */ size_t getDimension() const { return N; } /** * Return lead dimension of the matrix. */ size_t getLeadDimension() const { return ld; } /** * Return a pointer to the data. */ const std::vector<T> &getData() const { return data; } /** * Return P[i,j]. * @param i row index * @param j column index * @return P[i,j] */ T get(size_t i, size_t j) const { return data[i * ld + j]; } /** * Set P[i,j] to x. * @param i row index. * @param j column index. * @param x value of type T */ void set(size_t i, size_t j, T x) { data[i * ld + j] = x; } /** * Overwrite the current matrix with zeroes. */ virtual void clear() { data.resize(N * ld, T(0)); } /** * Compute P^k. * @param P A pointer to a Transition Matrix. * @param k Exponent. * @return P^k */ TransitionMatrix<T> pow(uint k) const { // init matrix if (k == 0) { return eye(N); } // create binary representation of k int bin[32]; memset(bin, 0, 32 * sizeof(int)); int l = 31; int kk = k; while (kk > 0) { bin[l] = kk % 2; kk >>= 1; l--; } l += 2; #ifdef DEBUG std::cout << "bin: "; for (int i = 0; i < 32; i++) { std::cout << bin[i]; } std::cout << " l=" << l << std::endl; #endif TransitionMatrix<T> A(*this); // will be returned // binary exponentation - Left to Right (see Don. Knuth: Seminumerical Alg. Vol. 2 page 461) while (l < 32) { // square A = A * A; // multiply if (bin[l] == 1) A = A * *this; l++; } return A; } /** * Matrix multiplication. * @param P Transition matrix. * @return P * this */ TransitionMatrix<T> operator*(const TransitionMatrix<T> &P) const { TransitionMatrix<T> X(N); // will be returned #pragma omp parallel for for (size_t i = 0; i < N; i++) { for (size_t j = 0; j < N; j++) { T p_ij = 0; for (size_t k = 0; k < N; k++) { p_ij += this->get(i, k) * P.get(k, j); } X.set(i, j, p_ij); } } return X; } /** * Return a string that represents the matrix. */ virtual std::string to_string() const { std::stringstream ss; ss << "\n"; for (size_t i = 0; i < this->N; i++) { ss << " "; for (size_t j = 0; j < this->N - 1; j++) { ss << std::setprecision(std::numeric_limits<T>::digits10) << std::fixed << this->data[i * this->ld + j] << " "; } ss << std::setprecision(std::numeric_limits<T>::digits10) << std::fixed << this->data[i * this->ld + this->N - 1]; ss << "\n"; } return ss.str(); } /** * To output into streams. */ friend inline std::ostream &operator<<(std::ostream &out, const TransitionMatrix<T> &s) { out << s.to_string(); return out; } /** * Return the identity matrix with N rows and columns. * @param N Number of rows and columns. * @return Identity matrix. */ static TransitionMatrix<T> eye(size_t N) { TransitionMatrix<T> P(N); for (size_t i = 0; i < N; i++) P.set(i,i,1); return P; } }; /*********************************************************************** * template specializations **********************************************************************/ #ifdef USE_BLAS template<> TransitionMatrix<float> TransitionMatrix<float>::operator*(const TransitionMatrix<float> &P) const { const float alpha = 1.0; const float beta = 0.0; TransitionMatrix<float> X(N); // use cblas cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, N, N, N, alpha, &P.data[0], P.ld, &data[0], ld, beta, &X.data[0], X.ld); return X; } template<> TransitionMatrix<double> TransitionMatrix<double>::operator*(const TransitionMatrix<double> &P) const { const double alpha = 1.0; const double beta = 0.0; TransitionMatrix<double> X(N); // use cblas cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, N, N, N, alpha, &P.data[0], P.ld, &data[0], ld, beta, &X.data[0], X.ld); return X; } #endif } #endif /* INCLUDE_MARATHON_TRANSITIONMATRIX_H_ */
LAGraph_BF_full1a.c
//------------------------------------------------------------------------------ // LAGraph_BF_full1a.c: Bellman-Ford single-source shortest paths, returns tree, // while diagonal of input matrix A needs not to be explicit 0 //------------------------------------------------------------------------------ // LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved. // SPDX-License-Identifier: BSD-2-Clause // // See additional acknowledgments in the LICENSE file, // or contact permission@sei.cmu.edu for the full terms. //------------------------------------------------------------------------------ // LAGraph_BF_full1a: Bellman-Ford single source shortest paths, returning both // the path lengths and the shortest-path tree. contributed by Jinhao Chen and // Tim Davis, Texas A&M. // LAGraph_BF_full performs a Bellman-Ford to find out shortest path, parent // nodes along the path and the hops (number of edges) in the path from given // source vertex s in the range of [0, n) on graph given as matrix A with size // n*n. The sparse matrix A has entry A(i, j) if there is an edge from vertex i // to vertex j with weight w, then A(i, j) = w. // LAGraph_BF_full1a returns GrB_SUCCESS if it succeeds. In this case, there // are no negative-weight cycles in the graph, and d, pi, and h are returned. // The vector d has d(k) as the shortest distance from s to k. pi(k) = p+1, // where p is the parent node of k-th node in the shortest path. In particular, // pi(s) = 0. h(k) = hop(s, k), the number of edges from s to k in the shortest // path. // If the graph has a negative-weight cycle, GrB_NO_VALUE is returned, and the // GrB_Vectors d(k), pi(k) and h(k) (i.e., *pd_output, *ppi_output and // *ph_output respectively) will be NULL when negative-weight cycle detected. // Otherwise, other errors such as GrB_OUT_OF_MEMORY, GrB_INVALID_OBJECT, and // so on, can be returned, if these errors are found by the underlying // GrB_* functions. //------------------------------------------------------------------------------ #define LAGraph_FREE_WORK \ { \ GrB_free(&d); \ GrB_free(&dmasked); \ GrB_free(&dless); \ GrB_free(&Atmp); \ GrB_free(&BF_Tuple3); \ GrB_free(&BF_lMIN_Tuple3); \ GrB_free(&BF_PLUSrhs_Tuple3); \ GrB_free(&BF_LT_Tuple3); \ GrB_free(&BF_lMIN_Tuple3_Monoid); \ GrB_free(&BF_lMIN_PLUSrhs_Tuple3); \ LAGraph_Free ((void**)&I); \ LAGraph_Free ((void**)&J); \ LAGraph_Free ((void**)&w); \ LAGraph_Free ((void**)&W); \ LAGraph_Free ((void**)&h); \ LAGraph_Free ((void**)&pi); \ } #define LAGraph_FREE_ALL \ { \ LAGraph_FREE_WORK \ GrB_free (pd_output); \ GrB_free (ppi_output); \ GrB_free (ph_output); \ } #include <LAGraph.h> #include <LAGraphX.h> #include <LG_internal.h> // from src/utility typedef void (*LAGraph_binary_function) (void *, const void *, const void *) ; //------------------------------------------------------------------------------ // data type for each entry of the adjacent matrix A and "distance" vector d; // <INFINITY,INFINITY,INFINITY> corresponds to nonexistence of a path, and // the value <0, 0, NULL> corresponds to a path from a vertex to itself //------------------------------------------------------------------------------ typedef struct { double w; // w corresponds to a path weight. GrB_Index h; // h corresponds to a path size or number of hops. GrB_Index pi;// pi corresponds to the penultimate vertex along a path. // vertex indexed as 1, 2, 3, ... , V, and pi = 0 (as nil) // for u=v, and pi = UINT64_MAX (as inf) for (u,v) not in E } BF_Tuple3_struct; //------------------------------------------------------------------------------ // binary functions, z=f(x,y), where Tuple3xTuple3 -> Tuple3 //------------------------------------------------------------------------------ void BF_lMIN3 ( BF_Tuple3_struct *z, const BF_Tuple3_struct *x, const BF_Tuple3_struct *y ) { if (x->w < y->w || (x->w == y->w && x->h < y->h) || (x->w == y->w && x->h == y->h && x->pi < y->pi)) { if (z != x) { *z = *x; } } else { *z = *y; } } void BF_PLUSrhs3 ( BF_Tuple3_struct *z, const BF_Tuple3_struct *x, const BF_Tuple3_struct *y ) { z->w = x->w + y->w ; z->h = x->h + y->h ; z->pi = (x->pi != UINT64_MAX && y->pi != 0) ? y->pi : x->pi ; } void BF_LT3 ( bool *z, const BF_Tuple3_struct *x, const BF_Tuple3_struct *y ) { (*z) = (x->w < y->w || (x->w == y->w && x->h < y->h) || (x->w == y->w && x->h == y->h && x->pi < y->pi)) ; } // Given a n-by-n adjacency matrix A and a source vertex s. // If there is no negative-weight cycle reachable from s, return the distances // of shortest paths from s and parents along the paths as vector d. Otherwise, // returns d=NULL if there is a negtive-weight cycle. // pd_output is pointer to a GrB_Vector, where the i-th entry is d(s,i), the // sum of edges length in the shortest path // ppi_output is pointer to a GrB_Vector, where the i-th entry is pi(i), the // parent of i-th vertex in the shortest path // ph_output is pointer to a GrB_Vector, where the i-th entry is h(s,i), the // number of edges from s to i in the shortest path // A has weights on corresponding entries of edges // s is given index for source vertex GrB_Info LAGraph_BF_full1a ( GrB_Vector *pd_output, //the pointer to the vector of distance GrB_Vector *ppi_output, //the pointer to the vector of parent GrB_Vector *ph_output, //the pointer to the vector of hops const GrB_Matrix A, //matrix for the graph const GrB_Index s //given index of the source ) { GrB_Info info; char *msg = NULL ; // tmp vector to store distance vector after n (i.e., V) loops GrB_Vector d = NULL, dmasked = NULL, dless = NULL; GrB_Matrix Atmp = NULL; GrB_Type BF_Tuple3; GrB_BinaryOp BF_lMIN_Tuple3; GrB_BinaryOp BF_PLUSrhs_Tuple3; GrB_BinaryOp BF_LT_Tuple3; GrB_Monoid BF_lMIN_Tuple3_Monoid; GrB_Semiring BF_lMIN_PLUSrhs_Tuple3; GrB_Index nrows, ncols, n, nz; // n = # of row/col, nz = # of nnz in graph GrB_Index *I = NULL, *J = NULL; // for col/row indices of entries from A GrB_Index *h = NULL, *pi = NULL; double *w = NULL; BF_Tuple3_struct *W = NULL; if (pd_output != NULL) *pd_output = NULL; if (ppi_output != NULL) *ppi_output = NULL; if (ph_output != NULL) *ph_output = NULL; LG_CHECK (A == NULL || pd_output == NULL || ppi_output == NULL || ph_output == NULL, -1001, "inputs are NULL") ; GrB_TRY (GrB_Matrix_nrows (&nrows, A)) ; GrB_TRY (GrB_Matrix_ncols (&ncols, A)) ; GrB_TRY (GrB_Matrix_nvals (&nz, A)); LG_CHECK (nrows != ncols, -1002, "A must be square") ; n = nrows; LG_CHECK (s >= n || s < 0, -1003, "invalid source node") ; //-------------------------------------------------------------------------- // create all GrB_Type GrB_BinaryOp GrB_Monoid and GrB_Semiring //-------------------------------------------------------------------------- // GrB_Type GrB_TRY (GrB_Type_new(&BF_Tuple3, sizeof(BF_Tuple3_struct))); // GrB_BinaryOp GrB_TRY (GrB_BinaryOp_new(&BF_LT_Tuple3, (LAGraph_binary_function) (&BF_LT3), GrB_BOOL, BF_Tuple3, BF_Tuple3)); GrB_TRY (GrB_BinaryOp_new(&BF_lMIN_Tuple3, (LAGraph_binary_function) (&BF_lMIN3), BF_Tuple3, BF_Tuple3,BF_Tuple3)); GrB_TRY (GrB_BinaryOp_new(&BF_PLUSrhs_Tuple3, (LAGraph_binary_function)(&BF_PLUSrhs3), BF_Tuple3, BF_Tuple3, BF_Tuple3)); // GrB_Monoid BF_Tuple3_struct BF_identity = (BF_Tuple3_struct) { .w = INFINITY, .h = UINT64_MAX, .pi = UINT64_MAX }; LAGRAPH_OK(GrB_Monoid_new_UDT(&BF_lMIN_Tuple3_Monoid, BF_lMIN_Tuple3, &BF_identity)); //GrB_Semiring GrB_TRY (GrB_Semiring_new(&BF_lMIN_PLUSrhs_Tuple3, BF_lMIN_Tuple3_Monoid, BF_PLUSrhs_Tuple3)); //-------------------------------------------------------------------------- // allocate arrays used for tuplets //-------------------------------------------------------------------------- #if 1 I = LAGraph_Malloc (nz, sizeof(GrB_Index)) ; J = LAGraph_Malloc (nz, sizeof(GrB_Index)) ; w = LAGraph_Malloc (nz, sizeof(double)) ; W = LAGraph_Malloc (nz, sizeof(BF_Tuple3_struct)) ; LG_CHECK (I == NULL || J == NULL || w == NULL || W == NULL, -1004, "out of memory") ; //-------------------------------------------------------------------------- // create matrix Atmp based on A, while its entries become BF_Tuple3 type //-------------------------------------------------------------------------- LAGRAPH_OK(GrB_Matrix_extractTuples_FP64(I, J, w, &nz, A)); int nthreads; LAGRAPH_OK (LAGraph_GetNumThreads (&nthreads, NULL)) ; printf ("nthreads %d\n", nthreads) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (GrB_Index k = 0; k < nz; k++) { W[k] = (BF_Tuple3_struct) { .w = w[k], .h = 1, .pi = I[k] + 1 }; } GrB_TRY (GrB_Matrix_new(&Atmp, BF_Tuple3, n, n)); LAGRAPH_OK(GrB_Matrix_build_UDT(Atmp, I, J, W, nz, BF_lMIN_Tuple3)); LAGraph_Free ((void**)&I); LAGraph_Free ((void**)&J); LAGraph_Free ((void**)&W); LAGraph_Free ((void**)&w); #else TODO: GraphBLAS could use a new kind of unary operator, not z=f(x), but [z,flag] = f (aij, i, j, k, nrows, ncols, nvals, etc, ...) flag: keep or discard. Combines GrB_apply and GxB_select. builtins: f(...) = i, bool is true j, bool is true i+j*nrows, etc. k tril, triu (like GxB_select): return aij, and true/false boolean z=f(x,i). x: double, z:tuple3, i:GrB_Index with the row index of x // z = (BF_Tuple3_struct) { .w = x, .h = 1, .pi = i + 1 }; GrB_apply (Atmp, op, A, ...) in the BFS, this is used: op: z = f ( .... ) = i to replace x(i) with i #endif //-------------------------------------------------------------------------- // create and initialize "distance" vector d, dmasked and dless //-------------------------------------------------------------------------- GrB_TRY (GrB_Vector_new(&d, BF_Tuple3, n)); // make d dense LAGRAPH_OK(GrB_Vector_assign_UDT(d, NULL, NULL, (void*)&BF_identity, GrB_ALL, n, NULL)); // initial distance from s to itself BF_Tuple3_struct d0 = (BF_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 }; LAGRAPH_OK(GrB_Vector_setElement_UDT(d, &d0, s)); // creat dmasked as a sparse vector with only one entry at s GrB_TRY (GrB_Vector_new(&dmasked, BF_Tuple3, n)); LAGRAPH_OK(GrB_Vector_setElement_UDT(dmasked, &d0, s)); // create dless GrB_TRY (GrB_Vector_new(&dless, GrB_BOOL, n)); //-------------------------------------------------------------------------- // start the Bellman Ford process //-------------------------------------------------------------------------- bool any_dless= true; // if there is any newly found shortest path int64_t iter = 0; // number of iterations // terminate when no new path is found or more than V-1 loops while (any_dless && iter < n - 1) { // execute semiring on dmasked and A, and save the result to dmasked GrB_TRY (GrB_vxm(dmasked, GrB_NULL, GrB_NULL, BF_lMIN_PLUSrhs_Tuple3, dmasked, Atmp, GrB_NULL)); // dless = d .< dtmp GrB_TRY (GrB_eWiseMult(dless, NULL, NULL, BF_LT_Tuple3, dmasked, d, NULL)); // if there is no entry with smaller distance then all shortest paths // are found GrB_TRY (GrB_reduce (&any_dless, NULL, GrB_LOR_MONOID_BOOL, dless, NULL)) ; if(any_dless) { // update all entries with smaller distances //GrB_TRY (GrB_apply(d, dless, NULL, BF_Identity_Tuple3, // dmasked, NULL)); GrB_TRY (GrB_assign(d, dless, NULL, dmasked, GrB_ALL, n, NULL)); // only use entries that were just updated //GrB_TRY (GrB_Vector_clear(dmasked)); //GrB_TRY (GrB_apply(dmasked, dless, NULL, BF_Identity_Tuple3, // d, NULL)); //try: GrB_TRY (GrB_assign(dmasked, dless, NULL, d, GrB_ALL, n, GrB_DESC_R)); } iter ++; } // check for negative-weight cycle only when there was a new path in the // last loop, otherwise, there can't be a negative-weight cycle. if (any_dless) { // execute semiring again to check for negative-weight cycle GrB_TRY (GrB_vxm(dmasked, GrB_NULL, GrB_NULL, BF_lMIN_PLUSrhs_Tuple3, dmasked, Atmp, GrB_NULL)); // dless = d .< dtmp GrB_TRY (GrB_eWiseMult(dless, NULL, NULL, BF_LT_Tuple3, dmasked, d, NULL)); // if there is no entry with smaller distance then all shortest paths // are found GrB_TRY (GrB_reduce (&any_dless, NULL, GrB_LOR_MONOID_BOOL, dless, NULL)) ; if(any_dless) { // printf("A negative-weight cycle found. \n"); LAGraph_FREE_ALL; return (GrB_NO_VALUE) ; } } //-------------------------------------------------------------------------- // extract tuple from "distance" vector d and create GrB_Vectors for output //-------------------------------------------------------------------------- I = LAGraph_Malloc (n, sizeof(GrB_Index)) ; W = LAGraph_Malloc (n, sizeof(BF_Tuple3_struct)) ; w = LAGraph_Malloc (n, sizeof(double)) ; h = LAGraph_Malloc (n, sizeof(GrB_Index)) ; pi = LAGraph_Malloc (n, sizeof(GrB_Index)) ; LG_CHECK (I == NULL || W == NULL || w == NULL || h == NULL || pi == NULL, -1004, "out of memory") ; // TODO: create 3 unary ops, and use GrB_apply? LAGRAPH_OK(GrB_Vector_extractTuples_UDT (I, (void *) W, &n, d)); for (GrB_Index k = 0; k < n; k++) { w [k] = W[k].w ; h [k] = W[k].h ; pi[k] = W[k].pi; } GrB_TRY (GrB_Vector_new(pd_output, GrB_FP64, n)); GrB_TRY (GrB_Vector_new(ppi_output, GrB_UINT64, n)); GrB_TRY (GrB_Vector_new(ph_output, GrB_UINT64, n)); GrB_TRY (GrB_Vector_build (*pd_output , I, w , n, GrB_MIN_FP64 )); GrB_TRY (GrB_Vector_build (*ppi_output, I, pi, n, GrB_MIN_UINT64)); GrB_TRY (GrB_Vector_build (*ph_output , I, h , n, GrB_MIN_UINT64)); LAGraph_FREE_WORK; return (GrB_SUCCESS) ; }
main.c
#include "common.h" static void print_help(char *argv) { END("%s [-f edge_file] [-W width] [-H height] [-D degree] [-R length] [-o output_file] [-s random_seed]\ [-n calculations] [-w max_temperature] [-c min_temperature] [-g groups] [-C cooling_cycle] [-B] [-d]\ [-F fixed_temperature] [-Y] [-M] [-h]\n", argv); } static void set_args(const int argc, char **argv, char *infname, int *low_length, char *outfname, int *random_seed, long long *ncalcs, double *max_temp, double *min_temp, int *groups, int *cooling_cycle, bool *enable_hill_climbing, bool *enable_detect_temp, bool *enable_bfs, bool *enable_halfway, double *fixed_temp, int *width, int *height, int *max_degree, bool *enable_random) { if(argc < 3) print_help(argv[0]); int result; while((result = getopt(argc,argv,"f:W:H:D:R:o:s:n:w:c:g:C:BdF:YMhr"))!=-1){ switch(result){ case 'r': *enable_random = true; break; case 'f': if(strlen(optarg) > MAX_FILENAME_LENGTH) ERROR("Input filename is long (%s). Please change MAX_FILENAME_LENGTH.\n", optarg); strcpy(infname, optarg); break; case 'W': *width = atoi(optarg); if(*width <= 0) ERROR("-W value > 0\n"); break; case 'H': *height = atoi(optarg); if(*height <= 0) ERROR("-H value > 0\n"); break; case 'D': *max_degree = atoi(optarg); if(*max_degree <= 0) ERROR("-D value > 0\n"); break; case 'R': *low_length = atoi(optarg); if(*low_length <= 0) ERROR("-R value > 0\n"); break; case 'o': if(strlen(optarg) > MAX_FILENAME_LENGTH) ERROR("Output filename is long (%s). Please change MAX_FILENAME_LENGTH.\n", optarg); strcpy(outfname, optarg); break; case 's': *random_seed = atoi(optarg); if(*random_seed < 0) ERROR("-s value >= 0\n"); break; case 'n': *ncalcs = atoll(optarg); if(*ncalcs < 0) ERROR("-n value >= 0\n"); break; case 'w': *max_temp = atof(optarg); if(*max_temp <= 0) ERROR("-w value > 0\n"); break; case 'c': *min_temp = atof(optarg); if(*min_temp <= 0) ERROR("-c value > 0\n"); break; case 'g': *groups = atoi(optarg); if(*groups != 1 && *groups != 2 && *groups != 4) ERROR("-g value == 1 or 2 or 4\n"); break; case 'C': *cooling_cycle = atoi(optarg); if(*cooling_cycle <= 0) ERROR("-C value > 0\n"); break; case 'B': *enable_bfs = true; break; case 'd': *enable_detect_temp = true; break; case 'F': *fixed_temp = atof(optarg); if(*fixed_temp <= 0) ERROR("-F value > 0\n"); break; case 'Y': *enable_hill_climbing = true; break; case 'M': *enable_halfway = true; break; case 'h': default: print_help(argv[0]); } } } // The "edge" does not have NO_EDGE static int count_loop(const int lines, const int *edge) { int num = 0; for(int i=0;i<lines;i++) if(edge[i*2] == edge[i*2+1]) num++; return num; } static bool confirm_dist(const int v, const int w, const int height, const int low_length) { return (DISTANCE(v, w, height) <= low_length); } static void simple_exchange_edge(const int height, const int low_length, const int lines, int* edge) { while(1){ int e1, e2, new_e1_v, new_e1_w, new_e2_v, new_e2_w; do{ e1 = getRandom(lines); e2 = getRandom(lines); } while( e1 == e2 ); int e1_v = edge[e1*2]; int e1_w = edge[e1*2+1]; int e2_v = edge[e2*2]; int e2_w = edge[e2*2+1]; if(confirm_dist(e1_v, e2_v, height, low_length) && confirm_dist(e1_w, e2_w, height, low_length)){ new_e1_v = e1_v; new_e1_w = e2_v; new_e2_v = e1_w; new_e2_w = e2_w; } else if(confirm_dist(e1_v, e2_w, height, low_length) && confirm_dist(e1_w, e2_v, height, low_length)){ new_e1_v = e1_v; new_e1_w = e2_w; new_e2_v = e1_w; new_e2_w = e2_v; } else{ continue; } edge[2*e1] = new_e1_v; edge[2*e1+1] = new_e1_w; edge[2*e2] = new_e2_v; edge[2*e2+1] = new_e2_w; break; } } #ifdef _OPENMP static int top_down_step(const int nodes, const int num_frontier, const int max_degree, const int* degree, const int* restrict adjacency, int* restrict frontier, int* restrict next, char* restrict bitmap) { int count = 0; int local_frontier[nodes]; #pragma omp parallel private(local_frontier) { int local_count = 0; #pragma omp for nowait for(int i=0;i<num_frontier;i++){ int v = frontier[i]; for(int j=0;j<degree[v];j++){ int n = *(adjacency + v * max_degree + j); // adjacency[v][j]; if(bitmap[n] == NOT_VISITED){ bitmap[n] = VISITED; local_frontier[local_count++] = n; } } } // end for i #pragma omp critical { memcpy(&next[count], local_frontier, local_count*sizeof(int)); count += local_count; } } return count; } #else static int top_down_step(const int nodes, const int num_frontier, const int max_degree, const int *degree, const int* restrict adjacency, int* restrict frontier, int* restrict next, char* restrict bitmap) { int count = 0; for(int i=0;i<num_frontier;i++){ int v = frontier[i]; for(int j=0;j<degree[v];j++){ int n = *(adjacency + v * max_degree + j); // int n = adjacency[v][j]; if(bitmap[n] == NOT_VISITED){ bitmap[n] = VISITED; next[count++] = n; } } } return count; } #endif static int simple_bfs(const int nodes, const int max_degree, const int *degree, int *adjacency) { char *bitmap = malloc(sizeof(char) * nodes); int *frontier = malloc(sizeof(int) * nodes); int *next = malloc(sizeof(int) * nodes); int num_frontier = 1, root = 0, num = 0; for(int i=0;i<nodes;i++) bitmap[i] = NOT_VISITED; frontier[0] = root; bitmap[root] = VISITED; while(1){ num_frontier = top_down_step(nodes, num_frontier, max_degree, degree, adjacency, frontier, next, bitmap); if(num_frontier == 0) break; int *tmp = frontier; frontier = next; next = tmp; } for(int i=0;i<nodes;i++) if(bitmap[i] == NOT_VISITED) num++; free(bitmap); free(frontier); free(next); return num; } // Inherited from http://research.nii.ac.jp/graphgolf/c/create-lattice.c static void create_lattice(const int nodes, const int lines, const int width, const int height, const int max_degree, int *degree, const int low_length, int edge[lines*2]) { int i = 0; for(int x=0;x<width/2;x++){ for(int y=0;y<height;y++){ for(int k=0;k<max_degree;k++){ edge[i*2] = y + 2 * x * height; edge[i*2+1] = edge[2*i] + height; i++; } } } if(width%2 == 1){ for(int y=0;y<height/2;y++){ for(int k=0;k<max_degree;k++){ edge[i*2] = (width - 1) * height + 2 * y; edge[i*2+1] = edge[i*2] + 1; i++; } } /* add self-loop */ if(height%2 == 1){ for(int k=0;k<max_degree/2;k++){ edge[i*2] = edge[i*2+1] = nodes - 1; i++; } } } for(int i=0;i<lines*INITIAL_TIMES;i++) // Give randomness simple_exchange_edge(height, low_length, lines, edge); // Make an unconnected graph a connected graph // Note that the connected graph after this operation may have loops. int (*adjacency)[max_degree] = malloc(sizeof(int)*nodes*max_degree); // int adjacency[nodes][max_degree]; create_adjacency(nodes, lines, max_degree, (const int (*)[2])edge, adjacency, degree); int min_num = simple_bfs(nodes, max_degree, degree, (int *)adjacency); int *tmp_edge = malloc(lines*2*sizeof(int)); while(1){ memcpy(tmp_edge, edge, sizeof(int)*lines*2); simple_exchange_edge(height, low_length, lines, tmp_edge); create_adjacency(nodes, lines, max_degree, (const int (*)[2])tmp_edge, adjacency, degree); int tmp_num = simple_bfs(nodes, max_degree, degree, (int *)adjacency); if(tmp_num == 0){ memcpy(edge, tmp_edge, sizeof(int)*lines*2); break; } else{ if(tmp_num <= min_num){ min_num = tmp_num; memcpy(edge, tmp_edge, sizeof(int)*lines*2); } } } // Remove loops min_num = count_loop(lines, edge); if(min_num != 0){ while(1){ memcpy(tmp_edge, edge, sizeof(int)*lines*2); simple_exchange_edge(height, low_length, lines, tmp_edge); int tmp_num = count_loop(lines, tmp_edge); if(tmp_num == 0){ memcpy(edge, tmp_edge, sizeof(int)*lines*2); break; } else{ if(tmp_num <= min_num){ min_num = tmp_num; memcpy(edge, tmp_edge, sizeof(int)*lines*2); } } } } free(tmp_edge); free(adjacency); // for(int i=0;i<lines;i++) // printf("%d,%d %d,%d\n", WIDTH(edge[i*2], height), HEIGHT(edge[i*2], height), // WIDTH(edge[i*2+1], height), HEIGHT(edge[i*2+1], height)); //EXIT(0); } static int count_lines(const char *fname) { FILE *fp = NULL; if((fp = fopen(fname, "r")) == NULL) ERROR("File not found\n"); int lines = 0, c; while((c = fgetc(fp)) != EOF) if(c == '\n') lines++; fclose(fp); return lines; } static void read_file_lattice(int *edge, int *w, int *h, const char *fname) { FILE *fp; if((fp = fopen(fname, "r")) == NULL){ PRINT_R0("File not found\n"); EXIT(1); } int n[4]; *w = 0; *h = 0; while(fscanf(fp, "%d,%d %d,%d", &n[0], &n[1], &n[2], &n[3]) != EOF){ *w = MAX(*w, n[0]); *h = MAX(*h, n[1]); *w = MAX(*w, n[2]); *h = MAX(*h, n[3]); } *w += 1; *h += 1; rewind(fp); int i = 0; while(fscanf(fp, "%d,%d %d,%d", &n[0], &n[1], &n[2], &n[3]) != EOF){ edge[i*2 ] = n[0] * (*h) + n[1]; edge[i*2+1] = n[2] * (*h) + n[3]; i++; } fclose(fp); } static int max_node_num(const int lines, const int edge[lines*2]) { int max = edge[0]; for(int i=1;i<lines*2;i++) max = MAX(max, edge[i]); return max; } static void count_degree(const int nodes, const int lines, const int edge[lines*2], int degree[nodes]) { for(int i=0;i<nodes;i++) degree[i] = 0; for(int i=0;i<lines*2;i++) degree[edge[i]]++; } static void verify_graph(const int nodes, const int lines, const int edge[lines*2], const int height, const int low_length, const int max_degree) { PRINT_R0("Verifing a regular graph... "); for(int i=0;i<lines;i++) if(DISTANCE(edge[i*2], edge[i*2+1], height) > low_length) ERROR("Over length in line %d: length = %d, distance = %d\n", i+1, low_length, DISTANCE(edge[i*2], edge[i*2+1], height)); int degree[nodes]; count_degree(nodes, lines, edge, degree); for(int i=0;i<nodes;i++) if(degree[i] > max_degree) ERROR("Degree is over %d\n", degree[i]); PRINT_R0("OK\n"); } static void create_symmetric_edge(int *edge, const int based_nodes, const int based_lines, const int groups, const int max_degree, int *degree, const int nodes, const int lines, const int height, const int width, const int based_height, const int low_length) { for(int i=0;i<based_lines;i++) for(int j=0;j<2;j++) edge[i*2+j] = WIDTH(edge[i*2+j], based_height) * height + HEIGHT(edge[i*2+j], based_height); if(groups == 2){ for(int i=0;i<based_lines;i++) for(int j=0;j<2;j++) edge[(based_lines+i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 180); } else if(groups == 4){ for(int i=0;i<based_lines;i++){ for(int j=0;j<2;j++){ edge[(based_lines +i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 90); edge[(based_lines*2+i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 180); edge[(based_lines*3+i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 270); } } } int *tmp_edge = malloc(lines*2*sizeof(int)); int (*adjacency)[max_degree] = malloc(sizeof(int)*nodes*max_degree); // int adjacency[nodes][max_degree]; for(int i=0;i<lines*INITIAL_TIMES;i++) // Give randomness exchange_edge(nodes, lines, (int (*)[2])edge, height, width, groups, low_length, 0); create_adjacency(nodes, lines, max_degree, (const int (*)[2])edge, adjacency, degree); int min_num = simple_bfs(nodes, max_degree, degree, (int *)adjacency); if(min_num != 0){ while(1){ memcpy(tmp_edge, edge, sizeof(int)*lines*2); exchange_edge(nodes, lines, (int (*)[2])tmp_edge, height, width, groups, low_length, 0); create_adjacency(nodes, lines, max_degree, (const int (*)[2])tmp_edge, adjacency, degree); int tmp_num = simple_bfs(nodes, max_degree, degree, (int *)adjacency); if(tmp_num == 0){ memcpy(edge, tmp_edge, sizeof(int)*lines*2); break; } else{ if(tmp_num <= min_num){ min_num = tmp_num; memcpy(edge, tmp_edge, sizeof(int)*lines*2); } } } } free(tmp_edge); free(adjacency); } static int dist(const int x1, const int y1, const int x2, const int y2) { return(abs(x1 - x2) + abs(y1 - y2)); } static void lower_bound_of_diam_aspl(int *low_diam, double *low_ASPL, const int m, const int n, const int max_degree, const int length) { int moore[m*n], hist[m*n], mh[m*n]; int mn = m * n, current = max_degree, ii; double sum = 0; moore[0] = 1; moore[1] = max_degree + 1; for(ii=2;;ii++){ current = current * (max_degree - 1); moore[ii] = moore[ii-1] + current; if(moore[ii] >= mn){ moore[ii] = mn; break; } } int maxhop = MAX((m+n-2+(length-1))/length, ii); for(int i=ii+1;i<=maxhop;i++) moore[i] = mn; for(int i=0;i<m;i++){ for(int j=0;j<n;j++){ for(int k=0;k<=maxhop;k++) hist[k] = 0; for (int i2=0;i2<m;i2++) for(int j2=0;j2<n;j2++) hist[(dist(i,j,i2,j2)+length-1)/length]++; for(int k=1;k<=maxhop;k++) hist[k] += hist[k-1]; for(int k=0;k<=maxhop;k++) mh[k] = MIN(hist[k], moore[k]); for(int k=1;k<=maxhop;k++) sum += (double)(mh[k] - mh[k-1]) * k; } } int dboth = 0; for(dboth=0;;dboth++) if(mh[dboth] == mn) break; *low_diam = dboth; *low_ASPL = sum/((double)mn*(mn-1)); } static void output_params(const int max_degree, const int groups, const int low_length, const int random_seed, const double max_temp, const double min_temp, const long long ncalcs, const int cooling_cycle, const double cooling_rate, const char *infname, const char *outfname, const double average_time, const bool enable_hill_climbing, const int width, const int height, const bool enable_bfs, const bool enable_fixed_temp, const double fixed_temp) { #ifdef NDEBUG PRINT_R0("NO DEBUG MODE\n"); #else PRINT_R0("DEBUG MODE\n"); #endif PRINT_R0("Seed : %d\n", random_seed); PRINT_R0("Processes: %d\n", procs); #ifdef _OPENMP PRINT_R0("Threads : %d\n", omp_get_max_threads()); #endif if(enable_bfs) PRINT_R0("APSP : BFS\n"); else PRINT_R0("APSP : MATRIX Opetation\n"); if(enable_hill_climbing) PRINT_R0("Algorithm: Hill climbing Method\n"); else{ if(enable_fixed_temp) PRINT_R0("Algorithm: Fixed Temperature Simulated Annealing : %f\n", fixed_temp); else PRINT_R0("Algorithm: Simulated Annealing\n"); PRINT_R0(" MAX Temperature: %f\n", max_temp); PRINT_R0(" MIN Temperature: %f\n", min_temp); PRINT_R0(" Cooling Cycle: %d\n", cooling_cycle); PRINT_R0(" Cooling Rate : %f\n", cooling_rate); } if(groups != 1) PRINT_R0(" Groups : %d\n", groups); PRINT_R0("Num. of Calulations: %lld\n", ncalcs); PRINT_R0(" Average APSP time : %f sec.\n", average_time); PRINT_R0(" Estimated elapse time: %f sec.\n", average_time * ncalcs); if(infname[0] != NOT_C_DEFINED) PRINT_R0("Input filename: %s\n", infname); PRINT_R0(" (w x h, d, r) = (%d x %d, %d, %d)\n", width, height, max_degree, low_length); if(outfname[0] != NOT_C_DEFINED) PRINT_R0("Output filename: %s\n", outfname); PRINT_R0("---\n"); } static void output_file(FILE *fp, const int lines, const int height, const int edge[lines*2]) { for(int i=0;i<lines;i++) fprintf(fp, "%d,%d %d,%d\n", WIDTH(edge[i*2], height), HEIGHT(edge[i*2], height), WIDTH(edge[i*2+1], height), HEIGHT(edge[i*2+1], height)); } int main(int argc, char *argv[]) { bool enable_hill_climbing = false, enable_detect_temp = false, enable_bfs = false, enable_halfway = false; bool enable_random = false; char hostname[MPI_MAX_PROCESSOR_NAME]; char infname[MAX_FILENAME_LENGTH] = {NOT_C_DEFINED}, outfname[MAX_FILENAME_LENGTH] = {NOT_C_DEFINED}; int random_seed = 0, cooling_cycle = 1, groups = 1; int namelen, based_lines, lines, based_width, based_height, based_nodes, nodes; int diam = NOT_N_DEFINED, max_degree = NOT_N_DEFINED, low_diam = NOT_N_DEFINED; int width = NOT_N_DEFINED, height = NOT_N_DEFINED, low_length = NOT_N_DEFINED; long long ncalcs = DEFAULT_NCALCS, num_accepts = 0; double ASPL = NOT_N_DEFINED, low_ASPL = NOT_N_DEFINED, cooling_rate = NOT_N_DEFINED, max_diff_energy = NOT_N_DEFINED; double max_temp = NOT_N_DEFINED, min_temp = NOT_N_DEFINED, fixed_temp = NOT_N_DEFINED; int *edge = NULL, *degree = NULL; FILE *fp = NULL; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &procs); MPI_Get_processor_name(hostname, &namelen); PRINT_R0("Run on %s\n", hostname); time_t t = time(NULL); PRINT_R0("%s---\n", ctime(&t)); // Set arguments set_args(argc, argv, infname, &low_length, outfname, &random_seed, &ncalcs, &max_temp, &min_temp, &groups, &cooling_cycle, &enable_hill_climbing, &enable_detect_temp, &enable_bfs, &enable_halfway, &fixed_temp, &width, &height, &max_degree, &enable_random); // Set other arguments bool enable_max_temp = (max_temp != NOT_N_DEFINED); bool enable_min_temp = (min_temp != NOT_N_DEFINED); bool enable_fixed_temp = (fixed_temp != NOT_N_DEFINED); bool enable_infname = (infname[0] != NOT_C_DEFINED); bool enable_outfname = (outfname[0] != NOT_C_DEFINED); bool enable_whd = (width != NOT_N_DEFINED && height != NOT_N_DEFINED && max_degree != NOT_N_DEFINED); // Check arguments if(low_length == NOT_N_DEFINED) ERROR("Must need -R\n"); if(enable_hill_climbing && enable_max_temp) ERROR("Both -Y and -w cannot be used.\n"); if(enable_hill_climbing && enable_min_temp) ERROR("Both -Y and -c cannot be used.\n"); if(enable_hill_climbing && enable_detect_temp) ERROR("Both -Y and -d cannot be used.\n"); if(!enable_infname && !enable_whd) ERROR("Must set -f or \"-W and -H and -D\"\n"); if(enable_halfway && !enable_infname) ERROR("Must set both -M and -f\n"); if(!enable_max_temp) max_temp = 100.0; if(!enable_min_temp) min_temp = 0.217147; if(max_temp == min_temp) ERROR("The same values in -w and -c.\n"); if(enable_detect_temp) ncalcs = DEFAULT_DETECT_NCALS; srandom(random_seed); if(enable_infname){ based_lines = count_lines(infname); lines = (enable_halfway)? based_lines : based_lines * groups; edge = malloc(sizeof(int)*lines*2); // int edge[lines][2]; read_file_lattice(edge, &based_width, &based_height, infname); based_nodes = max_node_num(based_lines, (int *)edge) + 1; if(enable_halfway){ based_nodes /= groups; based_lines /= groups; if(groups == 2){ based_height /= 2; } else if(groups == 4){ based_width /= 2; based_height /= 2; } } if(groups == 1){ height = based_height; width = based_width; } else if(groups == 2){ height = based_height * 2; width = based_width; } else{ // groups == 4 height = based_height * 2; width = based_width * 2; } nodes = based_nodes * groups; degree = malloc(nodes*sizeof(int)); if(max_degree == NOT_N_DEFINED){ for(int i=0;i<nodes;i++) degree[i] = 0; for(int i=0;i<lines*2;i++) degree[edge[i]]++; max_degree = 0; for(int i=0;i<nodes;i++) if(degree[i] > max_degree) max_degree = degree[i]; } if(enable_random) for(int i=0;i<lines*INITIAL_TIMES;i++) exchange_edge(nodes, lines, (int (*)[2])edge, height, width, groups, low_length, 0); } else{ ERROR("NOT implement yet\n"); nodes = width * height; based_nodes = nodes / groups; lines = nodes * max_degree / 2; based_lines = lines / groups; edge = malloc(sizeof(int)*lines*2); degree = malloc(sizeof(int)*nodes); // int degree[nodes]; if(groups == 1){ based_width = width; based_height = height; } else if(groups == 2){ based_width = width; based_height = height/2; } else{ // groups == 4 based_width = width/2; based_height = height/2; } } if(groups == 4 && (based_width != based_height)) ERROR("When g = 4, width(%d) must be equal to height(%d).\n", based_width, based_height); else if(groups == 4 && width%2 != 0 && height%2 != 0) ERROR("When g = 4, width(%d) and height(%d) must be divisible by 2.\n", width, height); else if(groups == 2 && height%2 != 0) ERROR("When g = 2, height(%d) must be divisible by 2.\n", height); else if(nodes%groups != 0) ERROR("nodes(%d) must be divisible by groups(%d)\n", nodes, groups); else if(lines%groups != 0) ERROR("(nodes*max_degree/2) must be divisible by groups(%d)\n", groups); else if(based_width*based_height != based_nodes) ERROR("Not grid graph (width %d x height %d != nodes %d).\n", based_width, based_height, based_nodes); if(!enable_infname) create_lattice(based_nodes, based_lines, based_width, based_height, max_degree, degree, low_length, edge); int *rotate_hash = malloc(nodes * sizeof(int)); create_rotate_hash(nodes, height, width, groups, rotate_hash); if(!enable_halfway && groups != 1) create_symmetric_edge(edge, based_nodes, based_lines, groups, max_degree, degree, nodes, lines, height, width, based_height, low_length); verify_graph(nodes, lines, edge, height, low_length, max_degree); lower_bound_of_diam_aspl(&low_diam, &low_ASPL, width, height, max_degree, low_length); check_current_edge(nodes, lines, max_degree, edge, low_ASPL, low_diam, groups, height, based_height, enable_bfs, rotate_hash, degree); double average_time = estimated_elapse_time(nodes, lines, max_degree, edge, height, width, based_height, groups, low_length, enable_bfs, rotate_hash, degree); if(enable_hill_climbing){ fixed_temp = max_temp = min_temp = 0.0; cooling_rate = 1.0; } else{ cooling_rate = pow(min_temp/max_temp, (double)cooling_cycle/ncalcs); } if(enable_outfname && rank == 0){ struct stat stat_buf; if(stat(outfname, &stat_buf) == 0) ERROR("Output file %s exsits. \n", outfname); if((fp = fopen(outfname, "w")) == NULL) ERROR("Cannot open %s\n", outfname); } output_params(max_degree, groups, low_length, random_seed, max_temp, min_temp, ncalcs, cooling_cycle, cooling_rate, infname, outfname, average_time, enable_hill_climbing, width, height, enable_bfs, enable_fixed_temp, fixed_temp); // Optimization timer_clear_all(); timer_start(TIMER_SA); long long step = sa(nodes, lines, max_degree, degree, based_nodes, ncalcs, cooling_rate, low_diam, low_ASPL, enable_bfs, enable_hill_climbing, enable_detect_temp, &max_diff_energy, max_temp, min_temp, fixed_temp, edge, &diam, &ASPL, cooling_cycle, &num_accepts, width, based_width, height, based_height, low_length, groups, rotate_hash, enable_fixed_temp); timer_stop(TIMER_SA); if(enable_detect_temp){ // Set max temperature to accept it 50% in maximum diff energy. PRINT_R0("Proposed max temperature is %f\n", (-1.0 * max_diff_energy) / log(0.5)); // Set min temperature to accept it 0.01% in minimum diff energy. END("Proposed min temperature is %f\n", (-2.0) / log(0.0001)); } // Output results PRINT_R0("---\n"); PRINT_R0("Diam. k = %d ASPL l = %f Diam. gap = %d ASPL gap = %f\n", diam, ASPL, diam-low_diam, ASPL-low_ASPL); double time_sa = timer_read(TIMER_SA); double time_apsp = timer_read(TIMER_APSP); double time_check = timer_read(TIMER_CHECK); PRINT_R0("Steps: %lld Elapse time: %f sec. (APSP: %f sec. Check: %f sec. Other: %f sec.)\n", step, time_sa, time_apsp, time_check, time_sa-(time_apsp+time_check)); if(ncalcs > SKIP_ACCEPTS) PRINT_R0("Accept rate: %f (= %lld/%lld)\n", (double)num_accepts/(ncalcs-SKIP_ACCEPTS), num_accepts, ncalcs-SKIP_ACCEPTS); if(rank == 0 && enable_outfname){ output_file(fp, lines, height, edge); fclose(fp); } verify_graph(nodes, lines, edge, height, low_length, max_degree); MPI_Finalize(); free(edge); free(degree); free(rotate_hash); return 0; }
tictoc.c
/* Copyright (c) 2012 by Marcin Krotkiewski, University of Oslo See ../License.txt for License Agreement. */ #include "tictoc.h" #include "debug_defs.h" static double flops = 0; static double bytes = 0; static long time_us = 0; #ifdef WINDOWS #else static struct timeval tb, te; #ifndef APPLE #ifdef USE_OPENMP #pragma omp threadprivate(tb, te, flops, bytes) #endif /* USE_OPENMP */ #endif /* APPLE */ #endif #ifdef MATLAB_MEX_FILE #include "mex.h" #endif void stats_zero(void) { flops = 0; time_us = 0; } void flops_add(double nflops) { flops += nflops; } void bytes_add(double nbytes) { bytes += nbytes; } double flops_get() { return flops; } double bytes_get() { return bytes; } #ifdef WINDOWS double elapsed_time(){} void _tic(){} void _toc(){} void _midtoc(){} void _ntoc(const char *idtxt){} void _nntoc(){} void _inctime(){} void stats_print(){} #else double elapsed_time() { long s,u; double tt; gettimeofday(&te, NULL); s=te.tv_sec-tb.tv_sec; u=te.tv_usec-tb.tv_usec; tt=((double)s)*1000000+u; return tt/1e6; } void _tic() { #ifdef USE_OPENMP #pragma omp master #endif { gettimeofday(&tb, NULL); flops=0; bytes=0; fflush(stdout); } } void _toc() { #ifdef USE_OPENMP #pragma omp master #endif { long s,u; double tt; gettimeofday(&te, NULL); s=te.tv_sec-tb.tv_sec; u=te.tv_usec-tb.tv_usec; tt=((double)s)*1000000+u; VERBOSE("time: %li.%.6lis", DEBUG_BASIC, (s*1000000+u)/1000000, (s*1000000+u)%1000000); VERBOSE("MFLOP/s: %.3lf", DEBUG_BASIC, flops/tt); VERBOSE("MB/s: %.3lf", DEBUG_BASIC, bytes/tt); VERBOSE("total fp operations: %.0lf", DEBUG_BASIC, flops); VERBOSE("total memory traffic %.0lf", DEBUG_BASIC, bytes); fflush(stdout); } } void _midtoc() { #ifdef USE_OPENMP #pragma omp master #endif { long s,u; double tt; gettimeofday(&te, NULL); s=te.tv_sec-tb.tv_sec; u=te.tv_usec-tb.tv_usec; tt=((double)s)*1000000+u; VERBOSE("time: %li.%.6lis", DEBUG_BASIC, (s*1000000+u)/1000000, (s*1000000+u)%1000000); VERBOSE("MFLOP/s: %.3lf\n", DEBUG_BASIC, flops/tt); fflush(stdout); } } void _ntoc(const char *idtxt) { #ifdef USE_OPENMP #pragma omp master #endif { long s,u; gettimeofday(&te, NULL); s=te.tv_sec-tb.tv_sec; u=te.tv_usec-tb.tv_usec; if(idtxt){ VERBOSE("%-30s%10li.%.6lis", DEBUG_BASIC, idtxt, (s*1000000+u)/1000000, (s*1000000+u)%1000000); } else { VERBOSE("time:%10li.%.6lis", DEBUG_BASIC, (s*1000000+u)/1000000, (s*1000000+u)%1000000); } fflush(stdout); } } void _inctime() { gettimeofday(&te, NULL); time_us += (te.tv_sec-tb.tv_sec)*1000000 + (te.tv_usec-tb.tv_usec); } void stats_print() { VERBOSE("total time %li.%6lis", DEBUG_BASIC, time_us/1000000, time_us%1000000); } #endif /* WINDOWS */
GB_binop__islt_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__islt_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__islt_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__islt_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_fp32) // A*D function (colscale): GB (_AxD__islt_fp32) // D*A function (rowscale): GB (_DxB__islt_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__islt_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__islt_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_fp32) // C=scalar+B GB (_bind1st__islt_fp32) // C=scalar+B' GB (_bind1st_tran__islt_fp32) // C=A+scalar GB (_bind2nd__islt_fp32) // C=A'+scalar GB (_bind2nd_tran__islt_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_FP32 || GxB_NO_ISLT_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__islt_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__islt_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__islt_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
multisort-omp-depend.c
#include <malloc.h> #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <sys/time.h> double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\ stamp = stamp/1e6;\ printf ("%s: %0.6f\n",(_m), stamp); // N and MIN must be powers of 2 long N; long MIN_SORT_SIZE; long MIN_MERGE_SIZE; #define BLOCK_SIZE 1024L #define T int void basicsort(long n, T data[n]); void basicmerge(long n, T left[n], T right[n], T result[n*2], long start, long length); void merge(long n, T left[n], T right[n], T result[n*2], long start, long length) { if (length < MIN_MERGE_SIZE*2L) { // Base case basicmerge(n, left, right, result, start, length); } else { // Recursive decomposition #pragma omp task merge(n, left, right, result, start, length/2); #pragma omp task merge(n, left, right, result, start + length/2, length/2); #pragma omp taskwait } } void multisort(long n, T data[n], T tmp[n]) { if (n >= MIN_SORT_SIZE*4L) { // Recursive decomposition #pragma omp task depend(out: data[0]) multisort(n/4L, &data[0], &tmp[0]); #pragma omp task depend(out: data[n/4L]) multisort(n/4L, &data[n/4L], &tmp[n/4L]); #pragma omp task depend(out: data[n/2L]) multisort(n/4L, &data[n/2L], &tmp[n/2L]); #pragma omp task depend(out: data[3L*n/4L]) multisort(n/4L, &data[3L*n/4L], &tmp[3L*n/4L]); #pragma omp task depend(in: data[0], data[n/4L]) depend(out: tmp[0]) merge(n/4L, &data[0], &data[n/4L], &tmp[0], 0, n/2L); #pragma omp task depend(in: data[n/2L], data[3L*n/4L]) depend(out: tmp[n/2L]) merge(n/4L, &data[n/2L], &data[3L*n/4L], &tmp[n/2L], 0, n/2L); #pragma omp task depend(in: tmp[0], tmp[n/2L]) merge(n/2L, &tmp[0], &tmp[n/2L], &data[0], 0, n); #pragma omp taskwait } else { // Base case basicsort(n, data); } } static void initialize(long length, T data[length]) { long i; for (i = 0; i < length; i++) { if (i==0) { data[i] = rand(); } else { data[i] = ((data[i-1]+1) * i * 104723L) % N; } } } static void clear(long length, T data[length]) { long i; for (i = 0; i < length; i++) { data[i] = 0; } } void check_sorted(long n, T data[n]) { int unsorted=0; for (int i=1; i<n; i++) if (data[i-1] > data[i]) unsorted++; if (unsorted > 0) printf ("\nERROR: data is NOT properly sorted. There are %d unordered positions\n\n",unsorted); else { // printf ("data IS ordered; "); } } int main(int argc, char **argv) { if (argc != 4) { fprintf(stderr, "Usage: %s <vector size in K> <sort size in K> <merge size in K>\n", argv[0]); return 1; } N = atol(argv[1]) * BLOCK_SIZE; MIN_SORT_SIZE = atol(argv[2]) * BLOCK_SIZE; MIN_MERGE_SIZE = atol(argv[3]) * BLOCK_SIZE; T *data = malloc(N*sizeof(T)); T *tmp = malloc(N*sizeof(T)); double stamp; START_COUNT_TIME; initialize(N, data); clear(N, tmp); STOP_COUNT_TIME("Initialization time in seconds"); START_COUNT_TIME; #pragma omp parallel #pragma omp single multisort(N, data, tmp); STOP_COUNT_TIME("Multisort execution time"); START_COUNT_TIME; check_sorted (N, data); STOP_COUNT_TIME("Check sorted data execution time"); fprintf(stdout, "Multisort program finished\n"); return 0; }
graphequivalance.c
#include "graph.h" #include "mainFunctions.h" #include "print.h" #include "powerperformacetracking.h" #include "communities.h" #include "graphprop.h" typedef struct graphmap { node_t newPos; // node_t revPos; } graphmap; void readMap(graph *G, graphmap* gm, const char* filename) { int r = 1; FILE* f; f = fopen(filename, "r"); node_t i,j; node_t id = 0; while(id < G->numNodes) { r = fscanf(f,"%d %d",&i,&j); assert(r != EOF); assert(i == id); gm[id].newPos = j; id++; } fclose(f); } void equivalance(graph *G, graph *newG, graphmap *gm) { bool hasEdgeWeight = false; if(G->weights != NULL) { hasEdgeWeight = true; } #pragma omp parallel { node_t x0; #pragma omp for schedule(dynamic, 1024) for (x0 = 0; x0 < G->numNodes; x0 ++) { node_t xNew = gm[x0].newPos; for (edge_t y_idx = G->begin[x0];y_idx < G->begin[x0+1] ; y_idx ++) { node_t y = G->node_idx [y_idx]; node_t yNew = gm[y].newPos; bool neighbour = false; int weight = 0; for(edge_t s = newG->begin[xNew]; s < newG->begin[xNew+1]; s++) { node_t dest = newG->node_idx[s]; if(dest == yNew) { neighbour = true; if(hasEdgeWeight) weight = newG->weights[s]; break; } } assert(neighbour == true); if(hasEdgeWeight) assert(weight == G->weights[y_idx]); } } } } /*** * Common entry point for all algorithms, **/ int runalgo(int argc,char** argv) { if(argc < 5) { const char* argList[4] = {"<inputfile1>" , "<graphformat>","<inputfile2>", "<graphmapfile>"}; printError() } graph* G = readGraph(argv[1], argv[2]); graph* newG = readGraph(argv[3], argv[2]); graphmap* gm = (graphmap*) malloc (G->numNodes * sizeof(graphmap)); readMap(G,gm, argv[4]); equivalance(G, newG, gm); } inline void kernel(graph *G) { }
rand.c
/* Copyright 2013. The Regents of the University of California. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2013 Martin Uecker <uecker@eecs.berkeley.edu> * 2013 Dara Bahri <dbahri123@gmail.com> */ #define _GNU_SOURCE #include <stdlib.h> #include <math.h> #include <complex.h> #include "num/multind.h" #ifdef USE_CUDA #include "num/gpuops.h" #endif #include "rand.h" unsigned int num_rand_seed = 123; void num_rand_init(unsigned int seed) { num_rand_seed = seed; } double uniform_rand(void) { double ret; #pragma omp critical ret = rand_r(&num_rand_seed) / (double)RAND_MAX; return ret; } /** * Box-Muller */ complex double gaussian_rand(void) { double u1, u2, s; do { u1 = 2. * uniform_rand() - 1.; u2 = 2. * uniform_rand() - 1.; s = u1 * u1 + u2 * u2; } while (s > 1.); double re = sqrt(-2. * log(s) / s) * u1; double im = sqrt(-2. * log(s) / s) * u2; return re + 1.i * im; } void md_gaussian_rand(unsigned int D, const long dims[D], complex float* dst) { #ifdef USE_CUDA if (cuda_ondevice(dst)) { complex float* tmp = md_alloc(D, dims, sizeof(complex float)); md_gaussian_rand(D, dims, tmp); md_copy(D, dims, dst, tmp, sizeof(complex float)); md_free(tmp); return; } #endif //#pragma omp parallel for for (long i = 0; i < md_calc_size(D, dims); i++) dst[i] = (complex float)gaussian_rand(); }
memory_characterization.c
#define _GNU_SOURCE #include <numa.h> #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <time.h> #include <kmeans/kmeans.h> /* diff two timespecs */ long double nanoseconds(struct timespec * start, struct timespec * end) { long double s = (long double) start->tv_nsec; s /= 1e9; s += (long double) start->tv_sec; long double e = (long double) end->tv_nsec; e /= 1e9; e += (long double) end->tv_sec; return e - s; } /* Fisher-Yates shuffle */ size_t * shuffle_array(size_t * array, const size_t size) { for(size_t i = size - 1; i > 0; i--) { const size_t j = rand() % (i + 1); array[i] ^= array[j]; array[j] ^= array[i]; array[i] ^= array[j]; }; return array; } /* sequential read */ long double sequential_read(int * array, const size_t size) { struct timespec start, end; clock_gettime(CLOCK_MONOTONIC, &start); #pragma omp parallel for for(size_t i = 0; i < size; i++) { (void) array[i]; } clock_gettime(CLOCK_MONOTONIC, &end); return nanoseconds(&start, &end); } /* sequential write */ long double sequential_write(int * array, const size_t size, const int value) { struct timespec start, end; clock_gettime(CLOCK_MONOTONIC, &start); #pragma omp parallel for for(size_t i = 0; i < size; i++) { array[i] = value; } clock_gettime(CLOCK_MONOTONIC, &end); return nanoseconds(&start, &end); } /* sequential read-write */ long double sequential_read_write(int * lhs, int * rhs, const size_t size) { struct timespec start, end; clock_gettime(CLOCK_MONOTONIC, &start); #pragma omp parallel for for(size_t i = 0; i < size; i++) { lhs[i] += rhs[i]; } clock_gettime(CLOCK_MONOTONIC, &end); return nanoseconds(&start, &end); } /* random read */ long double random_read(int * array, const size_t size, size_t * order) { struct timespec start, end; clock_gettime(CLOCK_MONOTONIC, &start); #pragma omp parallel for for(size_t i = 0; i < size; i++) { (void) array[order[i]]; } clock_gettime(CLOCK_MONOTONIC, &end); return nanoseconds(&start, &end); } /* random write */ long double random_write(int * array, const size_t size, const int value, size_t * order) { struct timespec start, end; clock_gettime(CLOCK_MONOTONIC, &start); #pragma omp parallel for for(size_t i = 0; i < size; i++) { array[order[i]] = value; } clock_gettime(CLOCK_MONOTONIC, &end); return nanoseconds(&start, &end); } /* random read-write */ long double random_read_write(int * lhs, int * rhs, const size_t size, size_t * order) { struct timespec start, end; clock_gettime(CLOCK_MONOTONIC, &start); #pragma omp parallel for for(size_t i = 0; i < size; i++) { const size_t j = order[i]; lhs[j] += rhs[j]; } clock_gettime(CLOCK_MONOTONIC, &end); return nanoseconds(&start, &end); } /* mirror read */ long double mirror_read(int * array, const size_t size) { struct timespec start, end; clock_gettime(CLOCK_MONOTONIC, &start); #pragma omp parallel for for(size_t i = 0; i < size; i++) { (void) array[i]; (void) array[size - 1 - i]; } clock_gettime(CLOCK_MONOTONIC, &end); return nanoseconds(&start, &end); } /* mirror write */ long double mirror_write(int * array, const size_t size, const int value) { struct timespec start, end; clock_gettime(CLOCK_MONOTONIC, &start); #pragma omp parallel for for(size_t i = 0; i < size; i++) { array[i] = value; array[size - 1 - i] = value; } clock_gettime(CLOCK_MONOTONIC, &end); return nanoseconds(&start, &end); } /* mirror read write */ long double mirror_read_write(int * array, const size_t size) { struct timespec start, end; clock_gettime(CLOCK_MONOTONIC, &start); #pragma omp parallel for for(size_t i = 0; i < size; i++) { array[i] = array[size - 1 - i]; array[size - 1 - i] = array[i]; } clock_gettime(CLOCK_MONOTONIC, &end); return nanoseconds(&start, &end); } #define rwrw(name) \ long double name##_read; \ long double name##_write; \ long double name##_read_write; \ struct stats { int node; long long size; rwrw(sequential); rwrw(random); rwrw(mirror); long double avg; }; double distance(const Pointer lhs, const Pointer rhs) { struct stats * l = (struct stats *) lhs; struct stats * r = (struct stats *) rhs; const long double sdr = l->sequential_read - r->sequential_read; const long double sdw = l->sequential_write - r->sequential_write; const long double sdrw = l->sequential_read_write - r->sequential_read_write; const long double rdr = l->random_read - r->random_read; const long double rdw = l->random_write - r->random_write; const long double rdrw = l->random_read_write - r->random_read_write; const long double mdr = l->mirror_read - r->mirror_read; const long double mdw = l->mirror_write - r->mirror_write; const long double mdrw = l->mirror_read_write - r->mirror_read_write; const long double davg = l->avg - r->avg; return sdr * sdr + sdw * sdw + sdrw * sdrw + rdr * rdr + rdw * rdw + rdrw * rdrw + mdr * mdr + mdw * mdw + mdrw * mdrw + davg * davg + 0; } /* from kmeans/example2.c */ void centroid(const Pointer * objs, const int * clusters, size_t num_objs, int cluster, Pointer centroid) { int num_cluster = 0; struct stats sum; struct stats **pts = (struct stats**)objs; struct stats *center = (struct stats*)centroid; memset(&sum, 0, sizeof(struct stats)); if (num_objs <= 0) return; for (int i = 0; i < num_objs; i++) { /* Only process objects of interest */ if (clusters[i] != cluster) continue; sum.sequential_read += pts[i]->sequential_read; sum.sequential_write += pts[i]->sequential_write; sum.sequential_read_write += pts[i]->sequential_read_write; sum.random_read += pts[i]->random_read; sum.random_write += pts[i]->random_write; sum.random_read_write += pts[i]->random_read_write; sum.mirror_read += pts[i]->mirror_read; sum.mirror_write += pts[i]->mirror_write; sum.mirror_read_write += pts[i]->mirror_read_write; sum.avg += pts[i]->avg; num_cluster++; } if (num_cluster) { sum.sequential_read /= num_cluster; sum.sequential_write /= num_cluster; sum.sequential_read_write /= num_cluster; sum.random_read /= num_cluster; sum.random_write /= num_cluster; sum.random_read_write /= num_cluster; sum.mirror_read /= num_cluster; sum.mirror_write /= num_cluster; sum.mirror_read_write /= num_cluster; sum.avg /= num_cluster; *center = sum; } return; } struct stats * run_kernels(int * numa_nodes, const size_t numa_node_count, const size_t size, const size_t iterations) { /* number of elements */ const size_t count = size / sizeof(int); /* an array of indicies */ size_t * order = calloc(count, sizeof(size_t)); for(size_t i = 0; i < count; i++) { order[i] = i; } struct stats * stats = calloc(numa_node_count, sizeof(struct stats)); /* run kernels on each numa node */ for(size_t i = 0; i < numa_node_count; i++) { stats[i].node = numa_nodes[i]; stats[i].size = numa_node_size64(stats[i].node, NULL); /* run each kernel multiple times */ for(size_t j = 0; j < iterations; j++) { int * lhs = numa_alloc_onnode(size, numa_nodes[i]); int * rhs = numa_alloc_onnode(size, numa_nodes[i]); memset(lhs, 0, size); memset(rhs, 0, size); /* /\* change the order indicies are read from *\/ */ /* shuffle_array(order, count); */ /* run each kernel */ stats[i].sequential_read += sequential_read (rhs, count); stats[i].sequential_write += sequential_write (rhs, count, 0); stats[i].sequential_read_write += sequential_read_write(lhs, rhs, count); stats[i].random_read += random_read (rhs, count, shuffle_array(order, count)); stats[i].random_write += random_write (rhs, count, 0, shuffle_array(order, count)); stats[i].random_read_write += random_read_write (lhs, rhs, count, shuffle_array(order, count)); stats[i].mirror_read += mirror_read (rhs, count); stats[i].mirror_write += mirror_write (rhs, count, 0); stats[i].mirror_read_write += mirror_read_write (rhs, count); numa_free(lhs, size); numa_free(rhs, size); } stats[i].avg = (stats[i].sequential_read + stats[i].sequential_write + stats[i].sequential_read_write + stats[i].random_read + stats[i].random_write + stats[i].random_read_write + stats[i].mirror_read + stats[i].mirror_write + stats[i].mirror_read_write) / 6.0; } free(order); return stats; } /* sorts backwards; highest time comes first */ int sort_by_avg_of_avg(const void * lhs, const void * rhs) { size_t l_count = 0; long double l_sum = 0; for(struct stats ** l = *(struct stats ***) lhs; *l; l++) { l_sum += (*l)->avg; l_count++; } size_t r_count = 0; long double r_sum = 0; for(struct stats ** r = *(struct stats ***) rhs; *r; r++) { r_sum += (*r)->avg; r_count++; } const long double l_avg = l_sum / l_count; const long double r_avg = r_sum / r_count; if (l_avg < r_avg) { return -1; } else if (l_avg > r_avg) { return 1; } return 0; } int main(int argc, char * argv[]) { srand(time(NULL)); if (argc < 4) { fprintf(stderr, "%s size iterations memory_type [memory_type ...]\n", argv[0]); fprintf(stderr, "\n"); fprintf(stderr, "The order memory types should be listed, if available:\n"); fprintf(stderr, " DRAM HBM GPU OPTANE\n\n"); return 1; } size_t size = 0; if (sscanf(argv[1], "%zu", &size) != 1) { fprintf(stderr, "Bad memory size\n"); return 1; } size_t iterations = 0; if (sscanf(argv[2], "%zu", &iterations) != 1) { fprintf(stderr, "Bad iteration count\n"); return 1; } if (numa_available() == -1) { fprintf(stderr, "could not start libnuma\n"); return 1; } /* find numa nodes */ const int max_numa_nodes = numa_max_node() + 1; size_t numa_node_count = 0; int * numa_nodes = calloc(max_numa_nodes, sizeof(int)); for(int i = 0; i < max_numa_nodes; i++) { if (numa_bitmask_isbitset(numa_all_nodes_ptr, i)) { numa_nodes[numa_node_count++] = i; } } /* get numbers from accessing NUMA nodes */ struct stats * stats = run_kernels(numa_nodes, numa_node_count, size, iterations); #if defined(DEBUG) || defined(KERNEL) for(size_t i = 0; i < numa_node_count; i++) { fprintf(stdout, "%d %Lf %Lf %Lf %Lf %Lf %Lf %Lf %Lf %Lf %Lf\n", stats[i].node, stats[i].sequential_read, stats[i].sequential_write, stats[i].sequential_read_write, stats[i].random_read, stats[i].random_write, stats[i].random_read_write, stats[i].mirror_read, stats[i].mirror_write, stats[i].mirror_read_write, stats[i].avg ); } #endif /* cluster the results */ kmeans_config config; config.k = argc - 3; config.num_objs = numa_node_count; config.max_iterations = iterations * 2; config.distance_method = distance; config.centroid_method = centroid; config.objs = calloc(config.num_objs, sizeof(Pointer)); config.centers = calloc(config.k, sizeof(Pointer)); config.clusters = calloc(config.num_objs, sizeof(Pointer)); for(int i = 0; i < config.num_objs; i++) { config.objs[i] = &stats[i]; } /* need to create copy of centers because kmeans function overwrites data */ struct stats * centers_copy = calloc(config.k, sizeof(struct stats)); for(int i = 0; i < config.k; i++) { centers_copy[i] = stats[i]; config.centers[i] = &centers_copy[i]; } /* figure out which stat belongs in which cluster */ kmeans(&config); /* array of clusters */ struct stats *** clustered = calloc(config.k, sizeof(struct stats **)); for(int i = 0; i < config.k; i++) { #if defined(DEBUG) || defined(CLUSTERS) fprintf(stderr, "Cluster %d:", i); #endif /* count how many items are in cluster i */ size_t count = 0; for(int j = 0; j < config.num_objs; j++) { if (config.clusters[j] == i) { count++; } } clustered[i] = calloc(count + 1, sizeof(struct stats *)); /* put the items into the cluster */ size_t k = 0; for(int j = 0; j < config.num_objs; j++) { if (config.clusters[j] == i) { clustered[i][k] = config.objs[j]; #if defined(DEBUG) || defined(CLUSTERS) fprintf(stderr, " %d", clustered[i][k]->node); #endif k++; } } #if defined(DEBUG) || defined(CLUSTERS) fprintf(stderr, "\n"); #endif } /* sort clusters by average of averages */ qsort(clustered, config.k, sizeof(struct stats **), sort_by_avg_of_avg); /* print */ for(int i = 0; i < config.k; i++) { fprintf(stdout, "%s:", argv[3 + i]); for(struct stats ** cluster = clustered[i]; *cluster; cluster++) { fprintf(stdout, " %d", (*cluster)->node); } fprintf(stdout, "\n"); free(clustered[i]); } /* cleanup */ free(clustered); free(centers_copy); free(config.objs); free(config.clusters); free(config.centers); free(stats); free(numa_nodes); return 0; }
omp_whereami.c
/* Program omp_whereami reports the mask for each OMP thread, and works for nsec seconds (10). This allows one to inspect occupation through utilities like top (e.g. execute top, then hit the 1 key). Uses maskeraid utilities github.com/TACC/maskeraid map_to_cpuid(cpu_id): will set current thread to cpu_id omp_report_mask(): reports masks of the threads load_cpu_nsec(nsec): load the cpu for nsec (default 10) */ /* omp_whereami.c is a driver 1.) Get line arguments (optional): help or number of seconds for load 2.) Start OpenMP parallel region omp_report_mask() reports masks for each thread 3.) Set a work load on each thread 4.) Finish parallel region Kent Milfeld 12/16/15 Added cmd_line argument extraction. Kent Milfeld 2016/07/13 */ #include <stdio.h> #include <omp.h> #include "opts.h" void load_cpu_nsec(int nsec); void omp_report_mask(); int map_to_cpuid( int icore); int main(int argc, char *argv[]){ int nthrds, thrd, cpuid; //Thread info int nsec = 10; // Load, default time int ierr; // Error number // cmdln_get_nsec_or_help( &nsec, argc, argv); //optional, get nsec from cmd line Maskopts opts(argc,argv); #pragma omp parallel private(thrd,ierr) { thrd = omp_get_thread_num(); nthrds = omp_get_num_threads(); // cpuid = thrd; // set cpuid to thread number (thrd) // ierr = map_to_cpuid( cpuid ); // set your own affinity here omp_report_mask(); // Call mask reporter load_cpu_nsec( nsec ); // Load up rank process so user can watch top. } }
convolution_3x3_pack4to1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack4to1_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 4a-inch/4a-64-outch; #if __aarch64__ kernel_tm_pack4.create(8 * inch / 4, 64, outch / 8 + (outch % 8) / 4 + outch % 4, (size_t)4u * 4, 4); #else kernel_tm_pack4.create(4 * inch / 4, 64, outch / 4 + outch % 4, (size_t)4u * 4, 4); #endif int p = 0; #if __aarch64__ for (; p + 7 < outch; p += 8) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); const Mat k4 = kernel_tm.channel(p + 4); const Mat k5 = kernel_tm.channel(p + 5); const Mat k6 = kernel_tm.channel(p + 6); const Mat k7 = kernel_tm.channel(p + 7); Mat g0 = kernel_tm_pack4.channel(p / 8); for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int q = 0; q + 3 < inch; q += 4) { const float* k00 = k0.row(q); const float* k01 = k0.row(q + 1); const float* k02 = k0.row(q + 2); const float* k03 = k0.row(q + 3); const float* k10 = k1.row(q); const float* k11 = k1.row(q + 1); const float* k12 = k1.row(q + 2); const float* k13 = k1.row(q + 3); const float* k20 = k2.row(q); const float* k21 = k2.row(q + 1); const float* k22 = k2.row(q + 2); const float* k23 = k2.row(q + 3); const float* k30 = k3.row(q); const float* k31 = k3.row(q + 1); const float* k32 = k3.row(q + 2); const float* k33 = k3.row(q + 3); const float* k40 = k4.row(q); const float* k41 = k4.row(q + 1); const float* k42 = k4.row(q + 2); const float* k43 = k4.row(q + 3); const float* k50 = k5.row(q); const float* k51 = k5.row(q + 1); const float* k52 = k5.row(q + 2); const float* k53 = k5.row(q + 3); const float* k60 = k6.row(q); const float* k61 = k6.row(q + 1); const float* k62 = k6.row(q + 2); const float* k63 = k6.row(q + 3); const float* k70 = k7.row(q); const float* k71 = k7.row(q + 1); const float* k72 = k7.row(q + 2); const float* k73 = k7.row(q + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00[8] = k01[k]; g00[9] = k11[k]; g00[10] = k21[k]; g00[11] = k31[k]; g00[12] = k41[k]; g00[13] = k51[k]; g00[14] = k61[k]; g00[15] = k71[k]; g00[16] = k02[k]; g00[17] = k12[k]; g00[18] = k22[k]; g00[19] = k32[k]; g00[20] = k42[k]; g00[21] = k52[k]; g00[22] = k62[k]; g00[23] = k72[k]; g00[24] = k03[k]; g00[25] = k13[k]; g00[26] = k23[k]; g00[27] = k33[k]; g00[28] = k43[k]; g00[29] = k53[k]; g00[30] = k63[k]; g00[31] = k73[k]; g00 += 32; } } } #endif // __aarch64__ for (; p + 3 < outch; p += 4) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); #if __aarch64__ Mat g0 = kernel_tm_pack4.channel(p / 8 + (p % 8) / 4); #else Mat g0 = kernel_tm_pack4.channel(p / 4); #endif for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int q = 0; q + 3 < inch; q += 4) { const float* k00 = k0.row(q); const float* k01 = k0.row(q + 1); const float* k02 = k0.row(q + 2); const float* k03 = k0.row(q + 3); const float* k10 = k1.row(q); const float* k11 = k1.row(q + 1); const float* k12 = k1.row(q + 2); const float* k13 = k1.row(q + 3); const float* k20 = k2.row(q); const float* k21 = k2.row(q + 1); const float* k22 = k2.row(q + 2); const float* k23 = k2.row(q + 3); const float* k30 = k3.row(q); const float* k31 = k3.row(q + 1); const float* k32 = k3.row(q + 2); const float* k33 = k3.row(q + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k01[k]; g00[5] = k11[k]; g00[6] = k21[k]; g00[7] = k31[k]; g00[8] = k02[k]; g00[9] = k12[k]; g00[10] = k22[k]; g00[11] = k32[k]; g00[12] = k03[k]; g00[13] = k13[k]; g00[14] = k23[k]; g00[15] = k33[k]; g00 += 16; } } } for (; p < outch; p++) { const Mat k0 = kernel_tm.channel(p); #if __aarch64__ Mat g0 = kernel_tm_pack4.channel(p / 8 + (p % 8) / 4 + p % 4); #else Mat g0 = kernel_tm_pack4.channel(p / 4 + p % 4); #endif for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int q = 0; q + 3 < inch; q += 4) { const float* k00 = k0.row(q); const float* k01 = k0.row(q + 1); const float* k02 = k0.row(q + 2); const float* k03 = k0.row(q + 3); g00[0] = k00[k]; g00[1] = k01[k]; g00[2] = k02[k]; g00[3] = k03[k]; g00 += 4; } } } } static void conv3x3s1_winograd64_pack4to1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _r06 = vld1q_f32(r0 + 24); float32x4_t _r07 = vld1q_f32(r0 + 28); float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f); float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(tmp[5][m], _tmp5m); vst1q_f32(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; float* r0_tm_6 = r0_tm_0 + tiles * 24; float* r0_tm_7 = r0_tm_0 + tiles * 28; for (int m = 0; m < 8; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f); float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); vst1q_f32(r0_tm_6, _r0tm6); vst1q_f32(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 32; r0_tm_1 += tiles * 32; r0_tm_2 += tiles * 32; r0_tm_3 += tiles * 32; r0_tm_4 += tiles * 32; r0_tm_5 += tiles * 32; r0_tm_6 += tiles * 32; r0_tm_7 += tiles * 32; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __aarch64__ if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + tiles % 12 % 4, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { float* tm2p = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0] \n" "sub %0, %0, #128 \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v18.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v19.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19"); r0 += bottom_blob_tm.cstep * 4; } } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8); #else float* tm2p = tm2.row(i / 8); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "sub %0, %0, #64 \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); #else asm volatile( "pld [%0, #256] \n" "vld4.f32 {d0-d3}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d4-d7}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d16-d19}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d20-d23}, [%0 :128] \n" "sub %0, %0, #96 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" "vst1.f32 {d16-d17}, [%1 :128]! \n" "vst1.f32 {d4-d5}, [%1 :128]! \n" "vst1.f32 {d20-d21}, [%1 :128]! \n" "vst1.f32 {d2-d3}, [%1 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "vst1.f32 {d6-d7}, [%1 :128]! \n" "vst1.f32 {d22-d23}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"); #endif r0 += bottom_blob_tm.cstep * 4; } } for (; i + 3 < tiles; i += 4) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #256] \n" "vld4.f32 {d0-d3}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d4-d7}, [%0 :128] \n" "sub %0, %0, #32 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" "vst1.f32 {d4-d5}, [%1 :128]! \n" "vst1.f32 {d2-d3}, [%1 :128]! \n" "vst1.f32 {d6-d7}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i < tiles; i++) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + i % 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 4u, 1, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; #if __aarch64__ nn_outch = outch >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); float* output2_tm = top_blob_tm.channel(p + 2); float* output3_tm = top_blob_tm.channel(p + 3); float* output4_tm = top_blob_tm.channel(p + 4); float* output5_tm = top_blob_tm.channel(p + 5); float* output6_tm = top_blob_tm.channel(p + 6); float* output7_tm = top_blob_tm.channel(p + 7); const Mat kernel01_tm = kernel_tm.channel(p / 8); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v11.4s, v0.4s, v4.s[1] \n" "fmla v14.4s, v0.4s, v4.s[2] \n" "fmla v17.4s, v0.4s, v4.s[3] \n" "fmla v20.4s, v0.4s, v5.s[0] \n" "fmla v23.4s, v0.4s, v5.s[1] \n" "fmla v26.4s, v0.4s, v5.s[2] \n" "fmla v29.4s, v0.4s, v5.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v12.4s, v1.4s, v4.s[1] \n" "fmla v15.4s, v1.4s, v4.s[2] \n" "fmla v18.4s, v1.4s, v4.s[3] \n" "fmla v21.4s, v1.4s, v5.s[0] \n" "fmla v24.4s, v1.4s, v5.s[1] \n" "fmla v27.4s, v1.4s, v5.s[2] \n" "fmla v30.4s, v1.4s, v5.s[3] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "fmla v13.4s, v2.4s, v4.s[1] \n" "fmla v16.4s, v2.4s, v4.s[2] \n" "fmla v19.4s, v2.4s, v4.s[3] \n" "fmla v22.4s, v2.4s, v5.s[0] \n" "fmla v25.4s, v2.4s, v5.s[1] \n" "fmla v28.4s, v2.4s, v5.s[2] \n" "fmla v31.4s, v2.4s, v5.s[3] \n" "fmla v8.4s, v3.4s, v6.s[0] \n" "fmla v11.4s, v3.4s, v6.s[1] \n" "fmla v14.4s, v3.4s, v6.s[2] \n" "fmla v17.4s, v3.4s, v6.s[3] \n" "fmla v20.4s, v3.4s, v7.s[0] \n" "fmla v23.4s, v3.4s, v7.s[1] \n" "fmla v26.4s, v3.4s, v7.s[2] \n" "fmla v29.4s, v3.4s, v7.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v9.4s, v0.4s, v6.s[0] \n" "fmla v12.4s, v0.4s, v6.s[1] \n" "fmla v15.4s, v0.4s, v6.s[2] \n" "fmla v18.4s, v0.4s, v6.s[3] \n" "fmla v21.4s, v0.4s, v7.s[0] \n" "fmla v24.4s, v0.4s, v7.s[1] \n" "fmla v27.4s, v0.4s, v7.s[2] \n" "fmla v30.4s, v0.4s, v7.s[3] \n" "fmla v10.4s, v1.4s, v6.s[0] \n" "fmla v13.4s, v1.4s, v6.s[1] \n" "fmla v16.4s, v1.4s, v6.s[2] \n" "fmla v19.4s, v1.4s, v6.s[3] \n" "fmla v22.4s, v1.4s, v7.s[0] \n" "fmla v25.4s, v1.4s, v7.s[1] \n" "fmla v28.4s, v1.4s, v7.s[2] \n" "fmla v31.4s, v1.4s, v7.s[3] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "fmla v8.4s, v2.4s, v4.s[0] \n" "fmla v11.4s, v2.4s, v4.s[1] \n" "fmla v14.4s, v2.4s, v4.s[2] \n" "fmla v17.4s, v2.4s, v4.s[3] \n" "fmla v20.4s, v2.4s, v5.s[0] \n" "fmla v23.4s, v2.4s, v5.s[1] \n" "fmla v26.4s, v2.4s, v5.s[2] \n" "fmla v29.4s, v2.4s, v5.s[3] \n" "fmla v9.4s, v3.4s, v4.s[0] \n" "fmla v12.4s, v3.4s, v4.s[1] \n" "fmla v15.4s, v3.4s, v4.s[2] \n" "fmla v18.4s, v3.4s, v4.s[3] \n" "fmla v21.4s, v3.4s, v5.s[0] \n" "fmla v24.4s, v3.4s, v5.s[1] \n" "fmla v27.4s, v3.4s, v5.s[2] \n" "fmla v30.4s, v3.4s, v5.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v10.4s, v0.4s, v4.s[0] \n" "fmla v13.4s, v0.4s, v4.s[1] \n" "fmla v16.4s, v0.4s, v4.s[2] \n" "fmla v19.4s, v0.4s, v4.s[3] \n" "fmla v22.4s, v0.4s, v5.s[0] \n" "fmla v25.4s, v0.4s, v5.s[1] \n" "fmla v28.4s, v0.4s, v5.s[2] \n" "fmla v31.4s, v0.4s, v5.s[3] \n" "fmla v8.4s, v1.4s, v6.s[0] \n" "fmla v11.4s, v1.4s, v6.s[1] \n" "fmla v14.4s, v1.4s, v6.s[2] \n" "fmla v17.4s, v1.4s, v6.s[3] \n" "fmla v20.4s, v1.4s, v7.s[0] \n" "fmla v23.4s, v1.4s, v7.s[1] \n" "fmla v26.4s, v1.4s, v7.s[2] \n" "fmla v29.4s, v1.4s, v7.s[3] \n" "fmla v9.4s, v2.4s, v6.s[0] \n" "fmla v12.4s, v2.4s, v6.s[1] \n" "fmla v15.4s, v2.4s, v6.s[2] \n" "fmla v18.4s, v2.4s, v6.s[3] \n" "fmla v21.4s, v2.4s, v7.s[0] \n" "fmla v24.4s, v2.4s, v7.s[1] \n" "fmla v27.4s, v2.4s, v7.s[2] \n" "fmla v30.4s, v2.4s, v7.s[3] \n" "fmla v10.4s, v3.4s, v6.s[0] \n" "fmla v13.4s, v3.4s, v6.s[1] \n" "fmla v16.4s, v3.4s, v6.s[2] \n" "fmla v19.4s, v3.4s, v6.s[3] \n" "fmla v22.4s, v3.4s, v7.s[0] \n" "fmla v25.4s, v3.4s, v7.s[1] \n" "fmla v28.4s, v3.4s, v7.s[2] \n" "fmla v31.4s, v3.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n" "st1 {v11.4s, v12.4s, v13.4s}, [%2], #48 \n" "st1 {v14.4s, v15.4s, v16.4s}, [%3], #48 \n" "st1 {v17.4s, v18.4s, v19.4s}, [%4], #48 \n" "st1 {v20.4s, v21.4s, v22.4s}, [%5], #48 \n" "st1 {v23.4s, v24.4s, v25.4s}, [%6], #48 \n" "st1 {v26.4s, v27.4s, v28.4s}, [%7], #48 \n" "st1 {v29.4s, v30.4s, v31.4s}, [%8], #48 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v0.4s, v4.s[0] \n" "fmla v18.4s, v0.4s, v4.s[1] \n" "fmla v20.4s, v0.4s, v4.s[2] \n" "fmla v22.4s, v0.4s, v4.s[3] \n" "fmla v24.4s, v0.4s, v5.s[0] \n" "fmla v26.4s, v0.4s, v5.s[1] \n" "fmla v28.4s, v0.4s, v5.s[2] \n" "fmla v30.4s, v0.4s, v5.s[3] \n" "fmla v17.4s, v1.4s, v4.s[0] \n" "fmla v19.4s, v1.4s, v4.s[1] \n" "fmla v21.4s, v1.4s, v4.s[2] \n" "fmla v23.4s, v1.4s, v4.s[3] \n" "fmla v25.4s, v1.4s, v5.s[0] \n" "fmla v27.4s, v1.4s, v5.s[1] \n" "fmla v29.4s, v1.4s, v5.s[2] \n" "fmla v31.4s, v1.4s, v5.s[3] \n" "fmla v16.4s, v2.4s, v6.s[0] \n" "fmla v18.4s, v2.4s, v6.s[1] \n" "fmla v20.4s, v2.4s, v6.s[2] \n" "fmla v22.4s, v2.4s, v6.s[3] \n" "fmla v24.4s, v2.4s, v7.s[0] \n" "fmla v26.4s, v2.4s, v7.s[1] \n" "fmla v28.4s, v2.4s, v7.s[2] \n" "fmla v30.4s, v2.4s, v7.s[3] \n" "fmla v17.4s, v3.4s, v6.s[0] \n" "fmla v19.4s, v3.4s, v6.s[1] \n" "fmla v21.4s, v3.4s, v6.s[2] \n" "fmla v23.4s, v3.4s, v6.s[3] \n" "fmla v25.4s, v3.4s, v7.s[0] \n" "fmla v27.4s, v3.4s, v7.s[1] \n" "fmla v29.4s, v3.4s, v7.s[2] \n" "fmla v31.4s, v3.4s, v7.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n" "fmla v16.4s, v12.4s, v8.s[0] \n" "fmla v18.4s, v12.4s, v8.s[1] \n" "fmla v20.4s, v12.4s, v8.s[2] \n" "fmla v22.4s, v12.4s, v8.s[3] \n" "fmla v24.4s, v12.4s, v9.s[0] \n" "fmla v26.4s, v12.4s, v9.s[1] \n" "fmla v28.4s, v12.4s, v9.s[2] \n" "fmla v30.4s, v12.4s, v9.s[3] \n" "fmla v17.4s, v13.4s, v8.s[0] \n" "fmla v19.4s, v13.4s, v8.s[1] \n" "fmla v21.4s, v13.4s, v8.s[2] \n" "fmla v23.4s, v13.4s, v8.s[3] \n" "fmla v25.4s, v13.4s, v9.s[0] \n" "fmla v27.4s, v13.4s, v9.s[1] \n" "fmla v29.4s, v13.4s, v9.s[2] \n" "fmla v31.4s, v13.4s, v9.s[3] \n" "fmla v16.4s, v14.4s, v10.s[0] \n" "fmla v18.4s, v14.4s, v10.s[1] \n" "fmla v20.4s, v14.4s, v10.s[2] \n" "fmla v22.4s, v14.4s, v10.s[3] \n" "fmla v24.4s, v14.4s, v11.s[0] \n" "fmla v26.4s, v14.4s, v11.s[1] \n" "fmla v28.4s, v14.4s, v11.s[2] \n" "fmla v30.4s, v14.4s, v11.s[3] \n" "fmla v17.4s, v15.4s, v10.s[0] \n" "fmla v19.4s, v15.4s, v10.s[1] \n" "fmla v21.4s, v15.4s, v10.s[2] \n" "fmla v23.4s, v15.4s, v10.s[3] \n" "fmla v25.4s, v15.4s, v11.s[0] \n" "fmla v27.4s, v15.4s, v11.s[1] \n" "fmla v29.4s, v15.4s, v11.s[2] \n" "fmla v31.4s, v15.4s, v11.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" "st1 {v20.4s, v21.4s}, [%3], #32 \n" "st1 {v22.4s, v23.4s}, [%4], #32 \n" "st1 {v24.4s, v25.4s}, [%5], #32 \n" "st1 {v26.4s, v27.4s}, [%6], #32 \n" "st1 {v28.4s, v29.4s}, [%7], #32 \n" "st1 {v30.4s, v31.4s}, [%8], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v0.4s, v4.s[0] \n" "fmla v17.4s, v0.4s, v4.s[1] \n" "fmla v18.4s, v0.4s, v4.s[2] \n" "fmla v19.4s, v0.4s, v4.s[3] \n" "fmla v20.4s, v0.4s, v5.s[0] \n" "fmla v21.4s, v0.4s, v5.s[1] \n" "fmla v22.4s, v0.4s, v5.s[2] \n" "fmla v23.4s, v0.4s, v5.s[3] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n" "fmla v16.4s, v1.4s, v6.s[0] \n" "fmla v17.4s, v1.4s, v6.s[1] \n" "fmla v18.4s, v1.4s, v6.s[2] \n" "fmla v19.4s, v1.4s, v6.s[3] \n" "fmla v20.4s, v1.4s, v7.s[0] \n" "fmla v21.4s, v1.4s, v7.s[1] \n" "fmla v22.4s, v1.4s, v7.s[2] \n" "fmla v23.4s, v1.4s, v7.s[3] \n" "fmla v16.4s, v2.4s, v8.s[0] \n" "fmla v17.4s, v2.4s, v8.s[1] \n" "fmla v18.4s, v2.4s, v8.s[2] \n" "fmla v19.4s, v2.4s, v8.s[3] \n" "fmla v20.4s, v2.4s, v9.s[0] \n" "fmla v21.4s, v2.4s, v9.s[1] \n" "fmla v22.4s, v2.4s, v9.s[2] \n" "fmla v23.4s, v2.4s, v9.s[3] \n" "fmla v16.4s, v3.4s, v10.s[0] \n" "fmla v17.4s, v3.4s, v10.s[1] \n" "fmla v18.4s, v3.4s, v10.s[2] \n" "fmla v19.4s, v3.4s, v10.s[3] \n" "fmla v20.4s, v3.4s, v11.s[0] \n" "fmla v21.4s, v3.4s, v11.s[1] \n" "fmla v22.4s, v3.4s, v11.s[2] \n" "fmla v23.4s, v3.4s, v11.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v17.4s}, [%2], #16 \n" "st1 {v18.4s}, [%3], #16 \n" "st1 {v19.4s}, [%4], #16 \n" "st1 {v20.4s}, [%5], #16 \n" "st1 {v21.4s}, [%6], #16 \n" "st1 {v22.4s}, [%7], #16 \n" "st1 {v23.4s}, [%8], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v0.4s}, [%9], #16 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v5.4s, v0.s[0] \n" "fmla v18.4s, v6.4s, v0.s[1] \n" "fmla v19.4s, v7.4s, v0.s[1] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n" "fmla v16.4s, v8.4s, v0.s[2] \n" "fmla v17.4s, v9.4s, v0.s[2] \n" "fmla v18.4s, v10.4s, v0.s[3] \n" "fmla v19.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "st1 {v16.s}[0], [%1], #4 \n" "st1 {v16.s}[1], [%2], #4 \n" "st1 {v16.s}[2], [%3], #4 \n" "st1 {v16.s}[3], [%4], #4 \n" "st1 {v17.s}[0], [%5], #4 \n" "st1 {v17.s}[1], [%6], #4 \n" "st1 {v17.s}[2], [%7], #4 \n" "st1 {v17.s}[3], [%8], #4 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); } } } remain_outch_start += nn_outch << 3; nn_outch = (outch - remain_outch_start) >> 2; #else // __aarch64__ nn_outch = outch >> 2; #endif // __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); float* output2_tm = top_blob_tm.channel(p + 2); float* output3_tm = top_blob_tm.channel(p + 3); #if __aarch64__ const Mat kernel01_tm = kernel_tm.channel(p / 8 + (p % 8) / 4); #else const Mat kernel01_tm = kernel_tm.channel(p / 4); #endif for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v11.4s, v0.4s, v4.s[1] \n" "fmla v14.4s, v0.4s, v4.s[2] \n" "fmla v17.4s, v0.4s, v4.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v12.4s, v1.4s, v4.s[1] \n" "fmla v15.4s, v1.4s, v4.s[2] \n" "fmla v18.4s, v1.4s, v4.s[3] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "fmla v13.4s, v2.4s, v4.s[1] \n" "fmla v16.4s, v2.4s, v4.s[2] \n" "fmla v19.4s, v2.4s, v4.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%5], #64 \n" "fmla v8.4s, v3.4s, v5.s[0] \n" "fmla v11.4s, v3.4s, v5.s[1] \n" "fmla v14.4s, v3.4s, v5.s[2] \n" "fmla v17.4s, v3.4s, v5.s[3] \n" "fmla v9.4s, v20.4s, v5.s[0] \n" "fmla v12.4s, v20.4s, v5.s[1] \n" "fmla v15.4s, v20.4s, v5.s[2] \n" "fmla v18.4s, v20.4s, v5.s[3] \n" "fmla v10.4s, v21.4s, v5.s[0] \n" "fmla v13.4s, v21.4s, v5.s[1] \n" "fmla v16.4s, v21.4s, v5.s[2] \n" "fmla v19.4s, v21.4s, v5.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%5], #64 \n" "fmla v8.4s, v22.4s, v6.s[0] \n" "fmla v11.4s, v22.4s, v6.s[1] \n" "fmla v14.4s, v22.4s, v6.s[2] \n" "fmla v17.4s, v22.4s, v6.s[3] \n" "fmla v9.4s, v23.4s, v6.s[0] \n" "fmla v12.4s, v23.4s, v6.s[1] \n" "fmla v15.4s, v23.4s, v6.s[2] \n" "fmla v18.4s, v23.4s, v6.s[3] \n" "fmla v10.4s, v24.4s, v6.s[0] \n" "fmla v13.4s, v24.4s, v6.s[1] \n" "fmla v16.4s, v24.4s, v6.s[2] \n" "fmla v19.4s, v24.4s, v6.s[3] \n" "fmla v8.4s, v25.4s, v7.s[0] \n" "fmla v11.4s, v25.4s, v7.s[1] \n" "fmla v14.4s, v25.4s, v7.s[2] \n" "fmla v17.4s, v25.4s, v7.s[3] \n" "fmla v9.4s, v26.4s, v7.s[0] \n" "fmla v12.4s, v26.4s, v7.s[1] \n" "fmla v15.4s, v26.4s, v7.s[2] \n" "fmla v18.4s, v26.4s, v7.s[3] \n" "fmla v10.4s, v27.4s, v7.s[0] \n" "fmla v13.4s, v27.4s, v7.s[1] \n" "fmla v16.4s, v27.4s, v7.s[2] \n" "fmla v19.4s, v27.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n" "st1 {v11.4s, v12.4s, v13.4s}, [%2], #48 \n" "st1 {v14.4s, v15.4s, v16.4s}, [%3], #48 \n" "st1 {v17.4s, v18.4s, v19.4s}, [%4], #48 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif // __aarch64__ for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v10.4s, v0.4s, v4.s[1] \n" "fmla v12.4s, v0.4s, v4.s[2] \n" "fmla v14.4s, v0.4s, v4.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v11.4s, v1.4s, v4.s[1] \n" "fmla v13.4s, v1.4s, v4.s[2] \n" "fmla v15.4s, v1.4s, v4.s[3] \n" "fmla v8.4s, v2.4s, v5.s[0] \n" "fmla v10.4s, v2.4s, v5.s[1] \n" "fmla v12.4s, v2.4s, v5.s[2] \n" "fmla v14.4s, v2.4s, v5.s[3] \n" "fmla v9.4s, v3.4s, v5.s[0] \n" "fmla v11.4s, v3.4s, v5.s[1] \n" "fmla v13.4s, v3.4s, v5.s[2] \n" "fmla v15.4s, v3.4s, v5.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%5], #64 \n" "fmla v8.4s, v16.4s, v6.s[0] \n" "fmla v10.4s, v16.4s, v6.s[1] \n" "fmla v12.4s, v16.4s, v6.s[2] \n" "fmla v14.4s, v16.4s, v6.s[3] \n" "fmla v9.4s, v17.4s, v6.s[0] \n" "fmla v11.4s, v17.4s, v6.s[1] \n" "fmla v13.4s, v17.4s, v6.s[2] \n" "fmla v15.4s, v17.4s, v6.s[3] \n" "fmla v8.4s, v18.4s, v7.s[0] \n" "fmla v10.4s, v18.4s, v7.s[1] \n" "fmla v12.4s, v18.4s, v7.s[2] \n" "fmla v14.4s, v18.4s, v7.s[3] \n" "fmla v9.4s, v19.4s, v7.s[0] \n" "fmla v11.4s, v19.4s, v7.s[1] \n" "fmla v13.4s, v19.4s, v7.s[2] \n" "fmla v15.4s, v19.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" "pld [%6, #512] \n" "vldm %6!, {d8-d15} \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q10, q0, d8[1] \n" "vmla.f32 q12, q0, d9[0] \n" "vmla.f32 q14, q0, d9[1] \n" "vmla.f32 q9, q1, d8[0] \n" "vmla.f32 q11, q1, d8[1] \n" "vmla.f32 q13, q1, d9[0] \n" "vmla.f32 q15, q1, d9[1] \n" "vmla.f32 q8, q2, d10[0] \n" "vmla.f32 q10, q2, d10[1] \n" "vmla.f32 q12, q2, d11[0] \n" "vmla.f32 q14, q2, d11[1] \n" "vmla.f32 q9, q3, d10[0] \n" "vmla.f32 q11, q3, d10[1] \n" "vmla.f32 q13, q3, d11[0] \n" "vmla.f32 q15, q3, d11[1] \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" "vmla.f32 q8, q0, d12[0] \n" "vmla.f32 q10, q0, d12[1] \n" "vmla.f32 q12, q0, d13[0] \n" "vmla.f32 q14, q0, d13[1] \n" "vmla.f32 q9, q1, d12[0] \n" "vmla.f32 q11, q1, d12[1] \n" "vmla.f32 q13, q1, d13[0] \n" "vmla.f32 q15, q1, d13[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q2, d14[0] \n" "vmla.f32 q10, q2, d14[1] \n" "vmla.f32 q12, q2, d15[0] \n" "vmla.f32 q14, q2, d15[1] \n" "vmla.f32 q9, q3, d14[0] \n" "vmla.f32 q11, q3, d14[1] \n" "vmla.f32 q13, q3, d15[0] \n" "vmla.f32 q15, q3, d15[1] \n" "bne 0b \n" "vst1.f32 {d16-d19}, [%1]! \n" "vst1.f32 {d20-d23}, [%2]! \n" "vst1.f32 {d24-d27}, [%3]! \n" "vst1.f32 {d28-d31}, [%4]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v0.4s, v4.s[1] \n" "fmla v10.4s, v0.4s, v4.s[2] \n" "fmla v11.4s, v0.4s, v4.s[3] \n" "fmla v8.4s, v1.4s, v5.s[0] \n" "fmla v9.4s, v1.4s, v5.s[1] \n" "fmla v10.4s, v1.4s, v5.s[2] \n" "fmla v11.4s, v1.4s, v5.s[3] \n" "fmla v8.4s, v2.4s, v6.s[0] \n" "fmla v9.4s, v2.4s, v6.s[1] \n" "fmla v10.4s, v2.4s, v6.s[2] \n" "fmla v11.4s, v2.4s, v6.s[3] \n" "fmla v8.4s, v3.4s, v7.s[0] \n" "fmla v9.4s, v3.4s, v7.s[1] \n" "fmla v10.4s, v3.4s, v7.s[2] \n" "fmla v11.4s, v3.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s}, [%1], #16 \n" "st1 {v9.4s}, [%2], #16 \n" "st1 {v10.4s}, [%3], #16 \n" "st1 {v11.4s}, [%4], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" "pld [%6, #512] \n" "vldm %6!, {d8-d15} \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q0, d8[1] \n" "vmla.f32 q10, q0, d9[0] \n" "vmla.f32 q11, q0, d9[1] \n" "vmla.f32 q8, q1, d10[0] \n" "vmla.f32 q9, q1, d10[1] \n" "vmla.f32 q10, q1, d11[0] \n" "vmla.f32 q11, q1, d11[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q2, d12[0] \n" "vmla.f32 q9, q2, d12[1] \n" "vmla.f32 q10, q2, d13[0] \n" "vmla.f32 q11, q2, d13[1] \n" "vmla.f32 q8, q3, d14[0] \n" "vmla.f32 q9, q3, d14[1] \n" "vmla.f32 q10, q3, d15[0] \n" "vmla.f32 q11, q3, d15[1] \n" "bne 0b \n" "vst1.f32 {d16-d17}, [%1]! \n" "vst1.f32 {d18-d19}, [%2]! \n" "vst1.f32 {d20-d21}, [%3]! \n" "vst1.f32 {d22-d23}, [%4]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif // __aarch64__ } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4); #endif const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[1] \n" "fmla v10.4s, v6.4s, v0.s[2] \n" "fmla v11.4s, v7.4s, v0.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "st1 {v8.s}[0], [%1], #4 \n" "st1 {v8.s}[1], [%2], #4 \n" "st1 {v8.s}[2], [%3], #4 \n" "st1 {v8.s}[3], [%4], #4 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5]! \n" "pld [%6, #512] \n" "vldm %6!, {d8-d15} \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q7, d1[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vst1.f32 {d16[0]}, [%1]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vst1.f32 {d17[0]}, [%3]! \n" "vst1.f32 {d17[1]}, [%4]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif // __aarch64__ } } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); #if __aarch64__ const Mat kernel0_tm = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4); #else const Mat kernel0_tm = kernel_tm.channel(p / 4 + p % 4); #endif for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* kptr = kernel0_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3], #16 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n" "fmla v5.4s, v3.4s, v4.s[1] \n" "fmla v6.4s, v12.4s, v4.s[1] \n" "fmla v7.4s, v13.4s, v4.s[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%2], #64 \n" "fmla v8.4s, v14.4s, v4.s[2] \n" "fmla v9.4s, v15.4s, v4.s[2] \n" "fmla v10.4s, v16.4s, v4.s[2] \n" "fmla v5.4s, v17.4s, v4.s[3] \n" "fmla v6.4s, v18.4s, v4.s[3] \n" "fmla v7.4s, v19.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v5.4s \n" "fadd v9.4s, v9.4s, v6.4s \n" "fadd v10.4s, v10.4s, v7.4s \n" "st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* kptr = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3], #16 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v10.4s, v2.4s, v4.s[1] \n" "fmla v11.4s, v3.4s, v4.s[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n" "fmla v8.4s, v12.4s, v4.s[2] \n" "fmla v9.4s, v13.4s, v4.s[2] \n" "fmla v10.4s, v14.4s, v4.s[3] \n" "fmla v11.4s, v15.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v10.4s \n" "fadd v9.4s, v9.4s, v11.4s \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #128] \n" "vld1.f32 {d8-d9}, [%3]! \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[0] \n" "vmla.f32 q10, q2, d8[1] \n" "vmla.f32 q11, q3, d8[1] \n" "pld [%2, #512] \n" "vldm %2!, {d24-d31} \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q12, d9[0] \n" "vmla.f32 q9, q13, d9[0] \n" "vmla.f32 q10, q14, d9[1] \n" "vmla.f32 q11, q15, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q9, q9, q11 \n" "vst1.f32 {d16-d19}, [%1]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* kptr = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3], #16 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[1] \n" "fmla v10.4s, v2.4s, v4.s[2] \n" "fmla v11.4s, v3.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "st1 {v8.4s}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #128] \n" "vld1.f32 {d8-d9}, [%3]! \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[1] \n" "vmla.f32 q10, q2, d9[0] \n" "vmla.f32 q11, q3, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vst1.f32 {d16-d17}, [%1]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11"); #endif // __aarch64__ } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4); #endif const float* kptr = kernel0_tm.row(r); float32x4_t _sum0 = vdupq_n_f32(0.f); for (int q = 0; q < inch; q++) { float32x4_t _r0 = vld1q_f32(r0); float32x4_t _k0 = vld1q_f32(kptr); _sum0 = vmlaq_f32(_sum0, _r0, _k0); kptr += 4; r0 += 4; } #if __aarch64__ float sum0 = vaddvq_f32(_sum0); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss2 = vpadd_f32(_ss, _ss); float sum0 = vget_lane_f32(_ss2, 0); #endif output0_tm[0] = sum0; output0_tm++; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 4u, 1, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; // float32x2_t _bias0 = vdup_n_f32(bias0); float tmp[6][8]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, 4u, 1, opt.workspace_allocator); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 1; const float* output0_tm_1 = output0_tm_0 + tiles * 1; const float* output0_tm_2 = output0_tm_0 + tiles * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 5; const float* output0_tm_6 = output0_tm_0 + tiles * 6; const float* output0_tm_7 = output0_tm_0 + tiles * 7; // TODO neon optimize for (int m = 0; m < 8; m++) { float tmp024a = output0_tm_1[0] + output0_tm_2[0]; float tmp135a = output0_tm_1[0] - output0_tm_2[0]; float tmp024b = output0_tm_3[0] + output0_tm_4[0]; float tmp135b = output0_tm_3[0] - output0_tm_4[0]; float tmp024c = output0_tm_5[0] + output0_tm_6[0]; float tmp135c = output0_tm_5[0] - output0_tm_6[0]; tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 8; output0_tm_1 += tiles * 8; output0_tm_2 += tiles * 8; output0_tm_3 += tiles * 8; output0_tm_4 += tiles * 8; output0_tm_5 += tiles * 8; output0_tm_6 += tiles * 8; output0_tm_7 += tiles * 8; } float* output0 = out0.row(i * 6) + j * 6; for (int m = 0; m < 6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_pack4to1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p + 1] : 0.f; out0.fill(bias0); out1.fill(bias1); const float* k0 = kernel.channel(p); const float* k1 = kernel.channel(p + 1); for (int q = 0; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00_0 = vld1q_f32(k0); float32x4_t _k01_0 = vld1q_f32(k0 + 4); float32x4_t _k02_0 = vld1q_f32(k0 + 8); float32x4_t _k10_0 = vld1q_f32(k0 + 12); float32x4_t _k11_0 = vld1q_f32(k0 + 16); float32x4_t _k12_0 = vld1q_f32(k0 + 20); float32x4_t _k20_0 = vld1q_f32(k0 + 24); float32x4_t _k21_0 = vld1q_f32(k0 + 28); float32x4_t _k22_0 = vld1q_f32(k0 + 32); float32x4_t _k00_1 = vld1q_f32(k1); float32x4_t _k01_1 = vld1q_f32(k1 + 4); float32x4_t _k02_1 = vld1q_f32(k1 + 8); float32x4_t _k10_1 = vld1q_f32(k1 + 12); float32x4_t _k11_1 = vld1q_f32(k1 + 16); float32x4_t _k12_1 = vld1q_f32(k1 + 20); float32x4_t _k20_1 = vld1q_f32(k1 + 24); float32x4_t _k21_1 = vld1q_f32(k1 + 28); float32x4_t _k22_1 = vld1q_f32(k1 + 32); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r00 r01 r02 r03 "fmul v16.4s, %10.4s, v0.4s \n" "fmul v17.4s, %19.4s, v0.4s \n" "fmul v18.4s, %10.4s, v1.4s \n" "fmul v19.4s, %19.4s, v1.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v4.4s, v5.4s}, [%2] \n" // r04 r05 "fmul v6.4s, %10.4s, v2.4s \n" "fmul v7.4s, %19.4s, v2.4s \n" "fmul v8.4s, %10.4s, v3.4s \n" "fmul v9.4s, %19.4s, v3.4s \n" "fmla v16.4s, %11.4s, v1.4s \n" "fmla v17.4s, %20.4s, v1.4s \n" "fmla v18.4s, %11.4s, v2.4s \n" "fmla v19.4s, %20.4s, v2.4s \n" "fmla v6.4s, %11.4s, v3.4s \n" "fmla v7.4s, %20.4s, v3.4s \n" "fmla v8.4s, %11.4s, v4.4s \n" "fmla v9.4s, %20.4s, v4.4s \n" "fmla v16.4s, %12.4s, v2.4s \n" "fmla v17.4s, %21.4s, v2.4s \n" "fmla v18.4s, %12.4s, v3.4s \n" "fmla v19.4s, %21.4s, v3.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r10 r11 r12 r12 "fmla v6.4s, %12.4s, v4.4s \n" "fmla v7.4s, %21.4s, v4.4s \n" "fmla v8.4s, %12.4s, v5.4s \n" "fmla v9.4s, %21.4s, v5.4s \n" "fmla v16.4s, %13.4s, v0.4s \n" "fmla v17.4s, %22.4s, v0.4s \n" "fmla v18.4s, %13.4s, v1.4s \n" "fmla v19.4s, %22.4s, v1.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4s, v5.4s}, [%3] \n" // r14 r15 "fmla v6.4s, %13.4s, v2.4s \n" "fmla v7.4s, %22.4s, v2.4s \n" "fmla v8.4s, %13.4s, v3.4s \n" "fmla v9.4s, %22.4s, v3.4s \n" "fmla v16.4s, %14.4s, v1.4s \n" "fmla v17.4s, %23.4s, v1.4s \n" "fmla v18.4s, %14.4s, v2.4s \n" "fmla v19.4s, %23.4s, v2.4s \n" "fmla v6.4s, %14.4s, v3.4s \n" "fmla v7.4s, %23.4s, v3.4s \n" "fmla v8.4s, %14.4s, v4.4s \n" "fmla v9.4s, %23.4s, v4.4s \n" "fmla v16.4s, %15.4s, v2.4s \n" "fmla v17.4s, %24.4s, v2.4s \n" "fmla v18.4s, %15.4s, v3.4s \n" "fmla v19.4s, %24.4s, v3.4s \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%4], #64 \n" // r20 r21 r22 r22 "fmla v6.4s, %15.4s, v4.4s \n" "fmla v7.4s, %24.4s, v4.4s \n" "fmla v8.4s, %15.4s, v5.4s \n" "fmla v9.4s, %24.4s, v5.4s \n" "fmla v16.4s, %16.4s, v0.4s \n" "fmla v17.4s, %25.4s, v0.4s \n" "fmla v18.4s, %16.4s, v1.4s \n" "fmla v19.4s, %25.4s, v1.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4s, v5.4s}, [%4] \n" // r24 r25 "fmla v6.4s, %16.4s, v2.4s \n" "fmla v7.4s, %25.4s, v2.4s \n" "fmla v8.4s, %16.4s, v3.4s \n" "fmla v9.4s, %25.4s, v3.4s \n" "fmla v16.4s, %17.4s, v1.4s \n" "fmla v17.4s, %26.4s, v1.4s \n" "fmla v18.4s, %17.4s, v2.4s \n" "fmla v19.4s, %26.4s, v2.4s \n" "fmla v6.4s, %17.4s, v3.4s \n" "fmla v7.4s, %26.4s, v3.4s \n" "fmla v8.4s, %17.4s, v4.4s \n" "fmla v9.4s, %26.4s, v4.4s \n" "fmla v16.4s, %18.4s, v2.4s \n" "fmla v17.4s, %27.4s, v2.4s \n" "fmla v18.4s, %18.4s, v3.4s \n" "fmla v19.4s, %27.4s, v3.4s \n" "fmla v6.4s, %18.4s, v4.4s \n" "fmla v7.4s, %27.4s, v4.4s \n" "fmla v8.4s, %18.4s, v5.4s \n" "fmla v9.4s, %27.4s, v5.4s \n" "ld1 {v0.4s}, [%0] \n" // sum00 sum01 sum02 sum03 "ld1 {v1.4s}, [%1] \n" // sum10 sum11 sum12 sum13 "faddp v16.4s, v16.4s, v16.4s \n" "faddp v17.4s, v17.4s, v17.4s \n" "faddp v18.4s, v18.4s, v18.4s \n" "faddp v19.4s, v19.4s, v19.4s \n" "faddp v6.4s, v6.4s, v6.4s \n" "faddp v7.4s, v7.4s, v7.4s \n" "faddp v8.4s, v8.4s, v8.4s \n" "faddp v9.4s, v9.4s, v9.4s \n" "faddp v16.2s, v16.2s, v18.2s \n" "faddp v17.2s, v17.2s, v19.2s \n" "faddp v6.2s, v6.2s, v8.2s \n" "faddp v7.2s, v7.2s, v9.2s \n" "trn1 v16.2d, v16.2d, v6.2d \n" "trn1 v17.2d, v17.2d, v7.2d \n" "fadd v0.4s, v0.4s, v16.4s \n" "fadd v1.4s, v1.4s, v17.4s \n" "st1 {v0.4s}, [%0], #16 \n" "st1 {v1.4s}, [%1], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "w"(_k00_0), // %10 "w"(_k01_0), // %11 "w"(_k02_0), // %12 "w"(_k10_0), // %13 "w"(_k11_0), // %14 "w"(_k12_0), // %15 "w"(_k20_0), // %16 "w"(_k21_0), // %17 "w"(_k22_0), // %18 "w"(_k00_1), // %19 "w"(_k01_1), // %20 "w"(_k02_1), // %21 "w"(_k10_1), // %22 "w"(_k11_1), // %23 "w"(_k12_1), // %24 "w"(_k20_1), // %25 "w"(_k21_1), // %26 "w"(_k22_1) // %27 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2] \n" // r00 r01 r02 r03 "fmul v16.4s, %10.4s, v0.4s \n" "fmul v17.4s, %19.4s, v0.4s \n" "fmul v18.4s, %10.4s, v1.4s \n" "fmul v19.4s, %19.4s, v1.4s \n" "fmla v16.4s, %11.4s, v1.4s \n" "fmla v17.4s, %20.4s, v1.4s \n" "fmla v18.4s, %11.4s, v2.4s \n" "fmla v19.4s, %20.4s, v2.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3] \n" // r10 r11 r12 r12 "fmla v16.4s, %12.4s, v2.4s \n" "fmla v17.4s, %21.4s, v2.4s \n" "fmla v18.4s, %12.4s, v3.4s \n" "fmla v19.4s, %21.4s, v3.4s \n" "fmla v16.4s, %13.4s, v4.4s \n" "fmla v17.4s, %22.4s, v4.4s \n" "fmla v18.4s, %13.4s, v5.4s \n" "fmla v19.4s, %22.4s, v5.4s \n" "fmla v16.4s, %14.4s, v5.4s \n" "fmla v17.4s, %23.4s, v5.4s \n" "fmla v18.4s, %14.4s, v6.4s \n" "fmla v19.4s, %23.4s, v6.4s \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%4] \n" // r20 r21 r22 r22 "fmla v16.4s, %15.4s, v6.4s \n" "fmla v17.4s, %24.4s, v6.4s \n" "fmla v18.4s, %15.4s, v7.4s \n" "fmla v19.4s, %24.4s, v7.4s \n" "fmla v16.4s, %16.4s, v0.4s \n" "fmla v17.4s, %25.4s, v0.4s \n" "fmla v18.4s, %16.4s, v1.4s \n" "fmla v19.4s, %25.4s, v1.4s \n" "fmla v16.4s, %17.4s, v1.4s \n" "fmla v17.4s, %26.4s, v1.4s \n" "fmla v18.4s, %17.4s, v2.4s \n" "fmla v19.4s, %26.4s, v2.4s \n" "fmla v16.4s, %18.4s, v2.4s \n" "fmla v17.4s, %27.4s, v2.4s \n" "fmla v18.4s, %18.4s, v3.4s \n" "fmla v19.4s, %27.4s, v3.4s \n" "ld1 {v4.2s}, [%0] \n" // sum00 sum01 "ld1 {v5.2s}, [%1] \n" // sum10 sum11 "faddp v16.4s, v16.4s, v16.4s \n" "faddp v17.4s, v17.4s, v17.4s \n" "faddp v18.4s, v18.4s, v18.4s \n" "faddp v19.4s, v19.4s, v19.4s \n" "add %2, %2, #32 \n" "faddp v16.2s, v16.2s, v18.2s \n" "faddp v17.2s, v17.2s, v19.2s \n" "add %3, %3, #32 \n" "fadd v4.2s, v4.2s, v16.2s \n" "fadd v5.2s, v5.2s, v17.2s \n" "add %4, %4, #32 \n" "st1 {v4.2s}, [%0], #8 \n" "st1 {v5.2s}, [%1], #8 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "w"(_k00_0), // %10 "w"(_k01_0), // %11 "w"(_k02_0), // %12 "w"(_k10_0), // %13 "w"(_k11_0), // %14 "w"(_k12_0), // %15 "w"(_k20_0), // %16 "w"(_k21_0), // %17 "w"(_k22_0), // %18 "w"(_k00_1), // %19 "w"(_k01_1), // %20 "w"(_k02_1), // %21 "w"(_k10_1), // %22 "w"(_k11_1), // %23 "w"(_k12_1), // %24 "w"(_k20_1), // %25 "w"(_k21_1), // %26 "w"(_k22_1) // %27 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%2, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%2] \n" // r00 r01 r02 "fmul v16.4s, %10.4s, v0.4s \n" "fmul v17.4s, %19.4s, v0.4s \n" "fmul v18.4s, %11.4s, v1.4s \n" "fmul v19.4s, %20.4s, v1.4s \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v3.4s, v4.4s, v5.4s}, [%3] \n" // r10 r11 r12 "fmla v16.4s, %12.4s, v2.4s \n" "fmla v17.4s, %21.4s, v2.4s \n" "fmla v18.4s, %13.4s, v3.4s \n" "fmla v19.4s, %22.4s, v3.4s \n" "fmla v16.4s, %14.4s, v4.4s \n" "fmla v17.4s, %23.4s, v4.4s \n" "prfm pldl1keep, [%4, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%4] \n" // r20 r21 r22 "fmla v18.4s, %15.4s, v5.4s \n" "fmla v19.4s, %24.4s, v5.4s \n" "fmla v16.4s, %16.4s, v0.4s \n" "fmla v17.4s, %25.4s, v0.4s \n" "fmla v18.4s, %17.4s, v1.4s \n" "fmla v19.4s, %26.4s, v1.4s \n" "fmla v16.4s, %18.4s, v2.4s \n" "fmla v17.4s, %27.4s, v2.4s \n" "ld1 {v3.s}[0], [%0] \n" // sum00 "ld1 {v4.s}[0], [%1] \n" // sum10 "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "add %2, %2, #16 \n" "faddp v16.4s, v16.4s, v16.4s \n" "faddp v17.4s, v17.4s, v17.4s \n" "add %3, %3, #16 \n" "faddp v16.2s, v16.2s, v16.2s \n" "faddp v17.2s, v17.2s, v17.2s \n" "add %4, %4, #16 \n" "fadd v3.2s, v3.2s, v16.2s \n" "fadd v4.2s, v4.2s, v17.2s \n" "st1 {v3.s}[0], [%0], #4 \n" "st1 {v4.s}[0], [%1], #4 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "w"(_k00_0), // %10 "w"(_k01_0), // %11 "w"(_k02_0), // %12 "w"(_k10_0), // %13 "w"(_k11_0), // %14 "w"(_k12_0), // %15 "w"(_k20_0), // %16 "w"(_k21_0), // %17 "w"(_k22_0), // %18 "w"(_k00_1), // %19 "w"(_k01_1), // %20 "w"(_k02_1), // %21 "w"(_k10_1), // %22 "w"(_k11_1), // %23 "w"(_k12_1), // %24 "w"(_k20_1), // %25 "w"(_k21_1), // %26 "w"(_k22_1) // %27 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19"); } r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; } k0 += 9 * 4; k1 += 9 * 4; } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out0.fill(bias0); const float* k0 = kernel.channel(p); for (int q = 0; q < inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k10 = vld1q_f32(k0 + 12); float32x4_t _k11 = vld1q_f32(k0 + 16); float32x4_t _k12 = vld1q_f32(k0 + 20); float32x4_t _k20 = vld1q_f32(k0 + 24); float32x4_t _k21 = vld1q_f32(k0 + 28); float32x4_t _k22 = vld1q_f32(k0 + 32); int i = 0; for (; i < outh; i++) { int j = 0; #if __aarch64__ for (; j + 7 < outw; j += 8) { asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" // r04 r05 r06 r07 "fmul v16.4s, %8.4s, v0.4s \n" "fmul v17.4s, %8.4s, v1.4s \n" "fmul v18.4s, %8.4s, v2.4s \n" "fmul v19.4s, %8.4s, v3.4s \n" "fmul v20.4s, %8.4s, v4.4s \n" "fmul v21.4s, %8.4s, v5.4s \n" "fmul v22.4s, %8.4s, v6.4s \n" "fmul v23.4s, %8.4s, v7.4s \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" // r08 r09 "fmla v16.4s, %9.4s, v1.4s \n" "fmla v17.4s, %9.4s, v2.4s \n" "fmla v18.4s, %9.4s, v3.4s \n" "fmla v19.4s, %9.4s, v4.4s \n" "fmla v20.4s, %9.4s, v5.4s \n" "fmla v21.4s, %9.4s, v6.4s \n" "fmla v22.4s, %9.4s, v7.4s \n" "fmla v23.4s, %9.4s, v8.4s \n" "fmla v16.4s, %10.4s, v2.4s \n" "fmla v17.4s, %10.4s, v3.4s \n" "fmla v18.4s, %10.4s, v4.4s \n" "fmla v19.4s, %10.4s, v5.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v20.4s, %10.4s, v6.4s \n" "fmla v21.4s, %10.4s, v7.4s \n" "fmla v22.4s, %10.4s, v8.4s \n" "fmla v23.4s, %10.4s, v9.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r14 r15 r16 r17 "fmla v16.4s, %11.4s, v0.4s \n" "fmla v17.4s, %11.4s, v1.4s \n" "fmla v18.4s, %11.4s, v2.4s \n" "fmla v19.4s, %11.4s, v3.4s \n" "fmla v20.4s, %11.4s, v4.4s \n" "fmla v21.4s, %11.4s, v5.4s \n" "fmla v22.4s, %11.4s, v6.4s \n" "fmla v23.4s, %11.4s, v7.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v8.4s, v9.4s}, [%2] \n" // r18 r19 "fmla v16.4s, %12.4s, v1.4s \n" "fmla v17.4s, %12.4s, v2.4s \n" "fmla v18.4s, %12.4s, v3.4s \n" "fmla v19.4s, %12.4s, v4.4s \n" "fmla v20.4s, %12.4s, v5.4s \n" "fmla v21.4s, %12.4s, v6.4s \n" "fmla v22.4s, %12.4s, v7.4s \n" "fmla v23.4s, %12.4s, v8.4s \n" "fmla v16.4s, %13.4s, v2.4s \n" "fmla v17.4s, %13.4s, v3.4s \n" "fmla v18.4s, %13.4s, v4.4s \n" "fmla v19.4s, %13.4s, v5.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v20.4s, %13.4s, v6.4s \n" "fmla v21.4s, %13.4s, v7.4s \n" "fmla v22.4s, %13.4s, v8.4s \n" "fmla v23.4s, %13.4s, v9.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r24 r25 r26 r27 "fmla v16.4s, %14.4s, v0.4s \n" "fmla v17.4s, %14.4s, v1.4s \n" "fmla v18.4s, %14.4s, v2.4s \n" "fmla v19.4s, %14.4s, v3.4s \n" "fmla v20.4s, %14.4s, v4.4s \n" "fmla v21.4s, %14.4s, v5.4s \n" "fmla v22.4s, %14.4s, v6.4s \n" "fmla v23.4s, %14.4s, v7.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4s, v9.4s}, [%3] \n" // r28 r29 "fmla v16.4s, %15.4s, v1.4s \n" "fmla v17.4s, %15.4s, v2.4s \n" "fmla v18.4s, %15.4s, v3.4s \n" "fmla v19.4s, %15.4s, v4.4s \n" "fmla v20.4s, %15.4s, v5.4s \n" "fmla v21.4s, %15.4s, v6.4s \n" "fmla v22.4s, %15.4s, v7.4s \n" "fmla v23.4s, %15.4s, v8.4s \n" "fmla v16.4s, %16.4s, v2.4s \n" "fmla v17.4s, %16.4s, v3.4s \n" "fmla v18.4s, %16.4s, v4.4s \n" "fmla v19.4s, %16.4s, v5.4s \n" "fmla v20.4s, %16.4s, v6.4s \n" "fmla v21.4s, %16.4s, v7.4s \n" "fmla v22.4s, %16.4s, v8.4s \n" "fmla v23.4s, %16.4s, v9.4s \n" "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" // sum0 sum1 sum2 sum3 sum4 sum5 sum6 sum7 "faddp v16.4s, v16.4s, v17.4s \n" "faddp v18.4s, v18.4s, v19.4s \n" "faddp v20.4s, v20.4s, v21.4s \n" "faddp v22.4s, v22.4s, v23.4s \n" "faddp v16.4s, v16.4s, v18.4s \n" "faddp v20.4s, v20.4s, v22.4s \n" "fadd v0.4s, v0.4s, v16.4s \n" "fadd v1.4s, v1.4s, v20.4s \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } #endif // __aarch64__ for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" // r04 r05 "fmul v16.4s, %8.4s, v0.4s \n" "fmul v17.4s, %8.4s, v1.4s \n" "fmul v18.4s, %8.4s, v2.4s \n" "fmul v19.4s, %8.4s, v3.4s \n" "fmla v16.4s, %9.4s, v1.4s \n" "fmla v17.4s, %9.4s, v2.4s \n" "fmla v18.4s, %9.4s, v3.4s \n" "fmla v19.4s, %9.4s, v8.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v16.4s, %10.4s, v2.4s \n" "fmla v17.4s, %10.4s, v3.4s \n" "fmla v18.4s, %10.4s, v8.4s \n" "fmla v19.4s, %10.4s, v9.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v8.4s, v9.4s}, [%2] \n" // r14 r15 "fmla v16.4s, %11.4s, v4.4s \n" "fmla v17.4s, %11.4s, v5.4s \n" "fmla v18.4s, %11.4s, v6.4s \n" "fmla v19.4s, %11.4s, v7.4s \n" "fmla v16.4s, %12.4s, v5.4s \n" "fmla v17.4s, %12.4s, v6.4s \n" "fmla v18.4s, %12.4s, v7.4s \n" "fmla v19.4s, %12.4s, v8.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v16.4s, %13.4s, v6.4s \n" "fmla v17.4s, %13.4s, v7.4s \n" "fmla v18.4s, %13.4s, v8.4s \n" "fmla v19.4s, %13.4s, v9.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4s, v9.4s}, [%3] \n" // r24 r25 "fmla v16.4s, %14.4s, v0.4s \n" "fmla v17.4s, %14.4s, v1.4s \n" "fmla v18.4s, %14.4s, v2.4s \n" "fmla v19.4s, %14.4s, v3.4s \n" "fmla v16.4s, %15.4s, v1.4s \n" "fmla v17.4s, %15.4s, v2.4s \n" "fmla v18.4s, %15.4s, v3.4s \n" "fmla v19.4s, %15.4s, v8.4s \n" "fmla v16.4s, %16.4s, v2.4s \n" "fmla v17.4s, %16.4s, v3.4s \n" "fmla v18.4s, %16.4s, v8.4s \n" "fmla v19.4s, %16.4s, v9.4s \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" // sum0 sum1 sum2 sum3 "faddp v16.4s, v16.4s, v17.4s \n" "faddp v18.4s, v18.4s, v19.4s \n" "faddp v16.4s, v16.4s, v18.4s \n" "fadd v0.4s, v0.4s, v16.4s \n" "st1 {v0.4s}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19"); #else // __aarch64__ asm volatile( "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" // r00 r01 "vmul.f32 q3, %q8, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d4-d5}, [%1 :128]! \n" // r02 "vmul.f32 q4, %q8, q1 \n" "vmla.f32 q3, %q9, q1 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" // r03 r04 "vmul.f32 q5, %q8, q2 \n" "vmla.f32 q4, %q9, q2 \n" "vmla.f32 q3, %q10, q2 \n" "vmul.f32 q6, %q8, q0 \n" "vmla.f32 q5, %q9, q0 \n" "vmla.f32 q4, %q10, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d4-d5}, [%1 :128] \n" // r05 "vmla.f32 q6, %q9, q1 \n" "vmla.f32 q5, %q10, q1 \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" // r10 r11 "vmla.f32 q6, %q10, q2 \n" "vmla.f32 q3, %q11, q0 \n" "pld [%2, #128] \n" "vld1.f32 {d4-d5}, [%2 :128]! \n" // r12 "vmla.f32 q4, %q11, q1 \n" "vmla.f32 q3, %q12, q1 \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" // r13 r14 "vmla.f32 q5, %q11, q2 \n" "vmla.f32 q4, %q12, q2 \n" "vmla.f32 q3, %q13, q2 \n" "vmla.f32 q6, %q11, q0 \n" "vmla.f32 q5, %q12, q0 \n" "vmla.f32 q4, %q13, q0 \n" "pld [%2, #128] \n" "vld1.f32 {d4-d5}, [%2 :128] \n" // r15 "vmla.f32 q6, %q12, q1 \n" "vmla.f32 q5, %q13, q1 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" // r20 r21 "vmla.f32 q6, %q13, q2 \n" "vmla.f32 q3, %q14, q0 \n" "pld [%3, #128] \n" "vld1.f32 {d4-d5}, [%3 :128]! \n" // r22 "vmla.f32 q4, %q14, q1 \n" "vmla.f32 q3, %q15, q1 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" // r23 r24 "vmla.f32 q5, %q14, q2 \n" "vmla.f32 q4, %q15, q2 \n" "vmla.f32 q3, %q16, q2 \n" "vmla.f32 q6, %q14, q0 \n" "vmla.f32 q5, %q15, q0 \n" "vmla.f32 q4, %q16, q0 \n" "pld [%3, #128] \n" "vld1.f32 {d4-d5}, [%3 :128] \n" // r25 "vmla.f32 q6, %q15, q1 \n" "vmla.f32 q5, %q16, q1 \n" "vld1.f32 {d0-d1}, [%0] \n" // sum0 sum1 sum2 sum3 "vmla.f32 q6, %q16, q2 \n" "vadd.f32 d6, d6, d7 \n" "vadd.f32 d8, d8, d9 \n" "vadd.f32 d10, d10, d11 \n" "vadd.f32 d12, d12, d13 \n" "sub %1, %1, #16 \n" "vpadd.f32 d6, d6, d8 \n" "vpadd.f32 d7, d10, d12 \n" "sub %2, %2, #16 \n" "vadd.f32 q0, q0, q3 \n" "sub %3, %3, #16 \n" "vst1.f32 {d0-d1}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1] \n" // r00 r01 r02 r03 "fmul v16.4s, %8.4s, v0.4s \n" "fmul v17.4s, %8.4s, v1.4s \n" "fmul v18.4s, %9.4s, v1.4s \n" "fmul v19.4s, %9.4s, v2.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2] \n" // r10 r11 r12 r13 "fmla v16.4s, %10.4s, v2.4s \n" "fmla v17.4s, %10.4s, v3.4s \n" "fmla v18.4s, %11.4s, v4.4s \n" "fmla v19.4s, %11.4s, v5.4s \n" "fmla v16.4s, %12.4s, v5.4s \n" "fmla v17.4s, %12.4s, v6.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3] \n" // r20 r21 r22 r23 "fmla v18.4s, %13.4s, v6.4s \n" "fmla v19.4s, %13.4s, v7.4s \n" "fmla v16.4s, %14.4s, v0.4s \n" "fmla v17.4s, %14.4s, v1.4s \n" "fmla v18.4s, %15.4s, v1.4s \n" "fmla v19.4s, %15.4s, v2.4s \n" "fmla v16.4s, %16.4s, v2.4s \n" "fmla v17.4s, %16.4s, v3.4s \n" "ld1 {v0.2s}, [%0] \n" // sum0 sum1 "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "add %1, %1, #32 \n" "faddp v16.4s, v16.4s, v17.4s \n" "add %2, %2, #32 \n" "faddp v16.4s, v16.4s, v16.4s \n" "add %3, %3, #32 \n" "fadd v0.2s, v0.2s, v16.2s \n" "st1 {v0.2s}, [%0], #8 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19"); #else // __aarch64__ asm volatile( "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" // r00 r01 "vmul.f32 q5, %q8, q0 \n" "vmul.f32 q6, %q8, q1 \n" "vmul.f32 q2, %q9, q1 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" // r02 r03 "vmul.f32 q3, %q9, q0 \n" "vmla.f32 q5, %q10, q0 \n" "vmla.f32 q6, %q10, q1 \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" // r10 r11 "vmla.f32 q2, %q11, q0 \n" "vmla.f32 q3, %q11, q1 \n" "vmla.f32 q5, %q12, q1 \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128] \n" // r12 r13 "vmla.f32 q6, %q12, q0 \n" "vmla.f32 q2, %q13, q0 \n" "vmla.f32 q3, %q13, q1 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" // r20 r21 "vmla.f32 q5, %q14, q0 \n" "vmla.f32 q6, %q14, q1 \n" "vmla.f32 q2, %q15, q1 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128] \n" // r22 r23 "vmla.f32 q3, %q15, q0 \n" "vmla.f32 q5, %q16, q0 \n" "vmla.f32 q6, %q16, q1 \n" "vld1.f32 {d8}, [%0] \n" // sum0 sum1 "vadd.f32 q5, q5, q2 \n" "vadd.f32 q6, q6, q3 \n" "vadd.f32 d10, d10, d11 \n" "vadd.f32 d12, d12, d13 \n" "vpadd.f32 d10, d10, d12 \n" "vadd.f32 d8, d8, d10 \n" "vst1.f32 {d8}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6"); #endif // __aarch64__ } for (; j < outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%1] \n" // r00 r01 r02 "eor v16.16b, v16.16b, v16.16b \n" "ld1 {v16.s}[0], [%0] \n" // sum0 "fmul v17.4s, %8.4s, v0.4s \n" "fmul v18.4s, %9.4s, v1.4s \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v3.4s, v4.4s, v5.4s}, [%2] \n" // r10 r11 r12 "fmla v16.4s, %10.4s, v2.4s \n" "fmla v17.4s, %11.4s, v3.4s \n" "fmla v18.4s, %12.4s, v4.4s \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%3] \n" // r20 r21 r22 "fmla v16.4s, %13.4s, v5.4s \n" "fmla v17.4s, %14.4s, v0.4s \n" "fmla v18.4s, %15.4s, v1.4s \n" "fmla v16.4s, %16.4s, v2.4s \n" "fadd v17.4s, v17.4s, v18.4s \n" "fadd v16.4s, v16.4s, v17.4s \n" "add %1, %1, #16 \n" "faddp v16.4s, v16.4s, v16.4s \n" "add %2, %2, #16 \n" "faddp v16.2s, v16.2s, v16.2s \n" "add %3, %3, #16 \n" "st1 {v16.s}[0], [%0], #4 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18"); #else // __aarch64__ asm volatile( "pld [%1, #384] \n" "vldm %1, {d0-d5} \n" // r00 r01 r02 "veor q3, q3 \n" "vld1.f32 {d6[0]}, [%0] \n" // sum0 "vmul.f32 q4, %q8, q0 \n" "vmul.f32 q5, %q9, q1 \n" "vmla.f32 q3, %q10, q2 \n" "pld [%2, #384] \n" "vldm %2, {d0-d5} \n" // r10 r11 r12 "vmla.f32 q4, %q11, q0 \n" "vmla.f32 q5, %q12, q1 \n" "vmla.f32 q3, %q13, q2 \n" "pld [%3, #384] \n" "vldm %3, {d0-d5} \n" // r20 r21 r22 "vmla.f32 q4, %q14, q0 \n" "vmla.f32 q5, %q15, q1 \n" "vmla.f32 q3, %q16, q2 \n" "vadd.f32 q4, q4, q5 \n" "vadd.f32 q3, q3, q4 \n" "add %1, %1, #16 \n" "vadd.f32 d6, d6, d7 \n" "add %2, %2, #16 \n" "vpadd.f32 d6, d6, d6 \n" "add %3, %3, #16 \n" "vst1.f32 {d6[0]}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "q0", "q1", "q2", "q3", "q4", "q5"); #endif // __aarch64__ } r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; } k0 += 9 * 4; } } }
leader_elect_hs.c
#include <stdio.h> #include <omp.h> #include <stdlib.h> #include <string.h> #include "ompdist/election.h" #include "ompdist/vector.h" #include "ompdist/utils.h" #include "ompdist/queues.h" #include "ompdist/msr.h" #include "config.h" typedef struct { int starter_label; int hops_left; int direction; int direction_changed; int stop_initiating; } message; /** * generate_send_messages - Creates a new phase's initial messages. * * @processes: the processes themselves * @l: current phase number * @N: total number of processes * @send_ql: a queuelist object that's going to store this round's messages */ void generate_send_messages(process* processes, int l, int N, queuelist* send_ql) { DEBUG("Generating 2*%d messages for %d processes\n", N, N); #pragma omp parallel for schedule(SCHEDULING_METHOD) for (int i = 0; i < N; i++) { process* p = processes+i; /** * If this node has been asked (or decided) to not send out any more * original messages (all nodes will always be up for propagating; but * creating new messages is a privilege that nodes might lose), don't * do anything. */ if (p->status == -1) continue; message to_right = {i, 1 << l, 1, 0, 0}; message to_left = {i, 1 << l, -1, 0, 0}; enqueue(send_ql, i, &to_right); enqueue(send_ql, i, &to_left); } } /** * propagate_messages - Propagates incomplete messages through the ring. * * @processes: the processes themselves * @l: current phase number * @N: total number of processes * @send_ql: a queuelist object that's going to store this round's messages * @recv_ql: a temporary queuelist object that will collect and transmit */ void propagate_messages(process* processes, int l, int N, queuelist* send_ql, queuelist* recv_ql) { DEBUG("propagating messages on phase %d\n", l); #pragma omp parallel for schedule(SCHEDULING_METHOD) for (int i = 0; i < N; i++) { DEBUG("i = %d\n", i); process* p = processes+i; while (!is_ql_queue_empty(send_ql, i)) { message* m = dequeue(send_ql, i); DEBUG("m->starter_label = %d\n", m->starter_label); /** * If the starter_label is the current node, then ther are two * possibilities: * - this node has returned back home; increment status and be * done with that message * - this node never turned direction; this node is the winner * so make this the leader * * Otherwise the message reached the far end. It's time to change * direction, refresh the number of hops_left and go back. */ if (m->starter_label == i && m->hops_left != (1 << l)) { if (m->stop_initiating) p->status = -1; else { if (m->direction_changed) p->status++; else { p->status = 3; break; } } continue; } if (m->hops_left == 0) { DEBUG("zero hops left\n"); m->hops_left = 1 << l; m->direction *= -1; m->direction_changed = 1; } /** * Make sure this message is good enough to propagate. A message * passes through a node only if the origin is not lesser than * the current node's label. A message that passes through a node * in one direction _will_ pass through the same node when it's * coming back. * * When a node passes a message along, it can no longer win. * Therefore, it'll mark itself as status = -1, meaning that * it'll no longer start messages. * * If a message is not passed through (m->starter_label < i) then * the origin must be asked to not pass messages anymore. */ if (m->starter_label < i) { /** * Of the (1 << l) hops the message intended to complete, it * has `hops_left` left, implying that it took * `(1 << l) - hops_left` hops to get here. It'll take exactly * the same number to go back to its origin. */ m->hops_left = (1 << l) - m->hops_left; m->direction *= -1; m->direction_changed = 1; m->stop_initiating = 1; continue; } else { m->hops_left--; p->status = -1; } int next_label = (N + i + m->direction) % N; enqueue(recv_ql, next_label, m); } } /** * At this point, every queue in `send_ql` must be empty (since that's the * only way each process breaks out of its while loop. The `dequeue` * operation will automatically set the number of elements to zero after * the last element. */ #pragma omp parallel for schedule(SCHEDULING_METHOD) for (int i = 0; i < N; i++) { process* p = processes+i; while (!is_ql_queue_empty(recv_ql, i)) { enqueue(send_ql, i, dequeue(recv_ql, i)); } } /** * At this point, every queue in `recv_ql` must be empty just like the last * time when `send_ql` was empty. */ } /** * check_status - Iterates through every process and checks if there are any * more phases to go through. * * @processes: The processes themselves. * @N: The number of processes * @send_ql: A pointer to the to-send queuelist * * Returns: * 2: This phase is done, but no leader has been found yet; phase++ * 1: The phase isn't over; some messages haven't propagated completely yet * <=0: Negative of this number is the leader; all phases are complete */ int check_statuses(process* processes, int N, queuelist* send_ql) { for (int i = 0; i < N; i++) { process* p = processes+i; /** * If p->status is 3, this node has decided to be the leader. */ if (p->status == 3) return -i; } for (int i = 0; i < N; i++) { if (!is_ql_queue_empty(send_ql, i)) return 1; } return 2; } /** * debug_display_queuelist - Prints debug information to stdout. Disabled if * `LOG_LEVEL` is less than 3. * * @ql: A pointer to a queuelist object */ void debug_display_queuelist(queuelist* ql) { DEBUG("displaying the queuelist\n"); for (int i = 0; i < ql->N; i++) { vector* v = ql->queues[i]; for (int j = ql->front[i]; j < v->used; j++) { message* m = elem_at(v, j); DEBUG("%d: {%d %d %2d %d %d}\n", i, m->starter_label, m->hops_left, m->direction, m->direction_changed, m->stop_initiating); } } } /** * Hirshberg-Sinclair algorithm for leader election. Source: Distributed * Algorithms (lecture notes for MIT's 6.852, fall 1992) by Nancy A. Lynch * and Boaz Patt-Sinclair - Section 2.1.1 Algorithm 2 */ int main(int argc, char* argv[]) { int N; process* processes; int iterate; int iterations = 1; if ((iterate = input_through_argv(argc, argv))) { FILE* in = fopen(argv[2], "r"); fscanf(in, "%d", &N); processes = generate_nodes(N); for (int i = 0; i < N; i++) { int x; fscanf(in, "%d", &x); processes[i].id = processes[i].leader = processes[i].send = x; } sscanf(argv[3], "%d", &iterations); } else { N = 16; if (argc > 1) sscanf(argv[1], "%d", &N); processes = generate_nodes(N); } long long duration = 0; double total_energy = 0; int verification; for (int i = 0; i < iterations; i++) { process* ps = generate_nodes(N); memcpy(ps, processes, sizeof(process)*N); /** * We need two different queue lists for the following reason. Say there * are two nodes A and B. For the sake of argument, say B has no messages * to pass on. B will immediately exit its loop. Say A wants to enqueue * a message into B's queue. Since B is already done, it'll never see this * message. To avoid this, we have two queues - messages will be picked * up from the send_ql, processed and added to the destination's recv_ql. * Then, after all threads complete, messages are copied from recv_ql * and simply copied to send_ql. */ queuelist* recv_ql = new_queuelist(N, sizeof(message)); queuelist* send_ql = new_queuelist(N, sizeof(message)); begin_timer(); init_energy_measure(); int chosen_id = -1; int l = 0; int finished = 0; while (!finished) { l += 1; DEBUG("starting phase %d\n", l); generate_send_messages(ps, l, N, send_ql); while (1) { propagate_messages(ps, l, N, send_ql, recv_ql); int status = check_statuses(ps, N, send_ql); DEBUG("status = %d\n", status); /** * Not all messages have propagated fully. Keep going. */ if (status == 1) continue; /** * All messages were processed, but there's no clear leader. Go * for another phase. */ if (status == 2) break; /** * A leader has been chosen! Make sure everybody knows this * and exit. */ if (status <= 0) { chosen_id = -status; set_leader(ps, N, chosen_id); finished = 1; break; } } } total_energy += total_energy_used(); duration += time_elapsed(); INFO("chosen leader: %d\n", chosen_id); INFO("number of phases: %d\n", l); free_queuelist(send_ql); free_queuelist(recv_ql); free(ps); } if (iterate) printf("%.2lf %.2lf\n", ((double) duration) / iterations, total_energy / iterations); return 0; }
SparseGaussianProcess.h
/* * Copyright 2015 Christoph Jud (christoph.jud@unibas.ch) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #pragma once #include <limits> #include "Kernel.h" #include "GaussianProcess.h" namespace gpr{ template <class TScalarType> class SparseLikelihood; template< class TScalarType > class SparseGaussianProcess : public GaussianProcess<TScalarType>{ public: typedef SparseGaussianProcess Self; typedef std::shared_ptr<Self> Pointer; typedef GaussianProcess<TScalarType> Superclass; typedef typename Superclass::VectorType VectorType; typedef typename Superclass::MatrixType MatrixType; typedef typename Superclass::DiagMatrixType DiagMatrixType; typedef typename Superclass::VectorListType VectorListType; typedef typename Superclass::MatrixListType MatrixListType; typedef typename Superclass::KernelType KernelType; typedef typename Superclass::KernelTypePointer KernelTypePointer; // Constructors SparseGaussianProcess(KernelTypePointer kernel) : Superclass(kernel), m_Jitter(0), m_Initialized(false){} SparseGaussianProcess(KernelTypePointer kernel, TScalarType jitter) : Superclass(kernel), m_Jitter(jitter), m_Initialized(false){} // Destructor virtual ~SparseGaussianProcess(){} /* * Add a new inducing sample lable pair to the sparse Gaussian process * x is the input vector * y the corresponding label vector */ void AddInducingSample(const VectorType& x, const VectorType& y){ if(m_InducingSampleVectors.size() == 0){ // first call of AddSample defines dimensionality of input space this->m_InputDimension = x.size(); } if(m_InducingLabelVectors.size() == 0){ // first call of AddSample defines dimensionality of output space this->m_OutputDimension = y.size(); } this->CheckInputDimension(x, "SparseGaussianProcess::AddInducingSample: "); this->CheckOutputDimension(y, "SparseGaussianProcess::AddInducingSample: "); m_InducingSampleVectors.push_back(x); m_InducingLabelVectors.push_back(y); m_Initialized = false; } /* * Removes all inducing sample lable pairs from the sparse Gaussian process */ void ClearInducingSamples(){ m_InducingSampleVectors.clear(); m_InducingLabelVectors.clear(); m_Initialized = false; } VectorType Predict(const VectorType &x){ Initialize(); this->CheckInputDimension(x, "GaussianProcess::Predict: "); VectorType Kx; ComputeKernelVector(x, Kx); return (Kx.adjoint() * m_RegressionVectors).adjoint(); } TScalarType operator()(const VectorType & x, const VectorType & y){ Initialize(); this->CheckInputDimension(x, "SparseGaussianProcess::(): "); this->CheckInputDimension(y, "SparseGaussianProcess::(): "); VectorType Kx; ComputeKernelVector(x, Kx); VectorType Ky; ComputeKernelVector(y, Ky); return (*this->m_Kernel)(x, y) - Kx.adjoint() * m_IndusingInvertedKernelMatrix * Ky + Kx.adjoint() * m_RegressionMatrix * Ky; } unsigned GetNumberOfInducingSamples() const{ return m_InducingSampleVectors.size(); } TScalarType GetJitter() const{ return m_Jitter; } void SetJitter(TScalarType jitter){ m_Jitter = jitter; m_Initialized = false; } virtual void Initialize(){ if(m_Initialized){ return; } if(!(m_InducingSampleVectors.size() > 0)){ throw std::string("SparseGaussianProcess::Initialize: no inducing samples defined during initialization"); } if(!(m_InducingLabelVectors.size() > 0)){ throw std::string("SparseGaussianProcess::Initialize: no inducing labels defined during initialization"); } if(!(this->m_SampleVectors.size() > 0)){ throw std::string("SparseGaussianProcess::Initialize: no dense samples defined during initialization"); } if(!(this->m_LabelVectors.size() > 0)){ throw std::string("SparseGaussianProcess::Initialize: no dense labels defined during initialization"); } PreComputeRegression(); m_Initialized = true; } // this method is public for using it in testing virtual void ComputeDenseKernelMatrix(MatrixType &M) const{ if(this->debug){ std::cout << "SparseGaussianProcess::ComputeDenseKernelMatrix: building kernel matrix... "; std::cout.flush(); } Superclass::ComputeKernelMatrixInternal(M, this->m_SampleVectors); if(this->debug) std::cout << "[done]" << std::endl; } protected: /* * Computation of inducing kernel matrix K_ij = k(x_i, x_j) * - it is symmetric therefore only half of the kernel evaluations * has to be performed * * (The actual computation is performed in ComputeKernelMatrixInternal) */ virtual void ComputeKernelMatrix(MatrixType &M) const{ if(this->debug){ std::cout << "SparseGaussianProcess::ComputeKernelMatrix: building kernel matrix... "; std::cout.flush(); } Superclass::ComputeKernelMatrixInternal(M, m_InducingSampleVectors); if(this->debug) std::cout << "[done]" << std::endl; } virtual void ComputeKernelMatrixWithJitter(MatrixType &M) const{ ComputeKernelMatrix(M); // add jitter to diagonal for(unsigned i=0; i<M.rows(); i++){ M(i,i) += m_Jitter; } } /* * Bring the label vectors in a matrix form Y, * where the rows are the labels. * * (it is actually performed in ComputeLabelMatrixInternal) */ virtual void ComputeLabelMatrix(MatrixType &Y) const{ Superclass::ComputeLabelMatrixInternal(Y, m_InducingLabelVectors); } /* * Bring the dense label vectors in a matrix form Y * * (calls the superclass method) */ virtual void ComputeDenseLabelMatrix(MatrixType &Y) const{ Superclass::ComputeLabelMatrix(Y); } /* * Computation of the kernel vector V_i = k(x, x_i) * * (calls ComputeKernelVectorInternal) */ virtual void ComputeKernelVector(const VectorType &x, VectorType &Kx) const{ Superclass::ComputeKernelVectorInternal(x, Kx, m_InducingSampleVectors); } /* * Computation of the cross-covariance matrix Kmn = k(x_i, y_j) * where x is in the inducing samples and y in the dense samples * * - Kmn = [Kx1 Kx2 ... Kxm] in R^nxm * * (calls ComputeKernelVectorInternal) */ virtual void ComputeKernelVectorMatrix(MatrixType &Knm) const{ unsigned n = this->m_SampleVectors.size(); unsigned m = m_InducingSampleVectors.size(); if(!(m<=n)){ throw std::string("SparseGaussianProcess::ComputeKernelVectorMatrix: number of dense samples must be higher than the number of sparse samples"); } Knm.resize(n, m); #pragma omp parallel for for(unsigned i=0; i<n; i++){ for(unsigned j=0; j<m; j++){ Knm(i, j) = (*this->m_Kernel)(this->m_SampleVectors[i], m_InducingSampleVectors[j]); } } } virtual void ComputeDerivativeKernelVectorMatrix(MatrixType &M)const{ unsigned num_params = this->m_Kernel->GetNumberOfParameters(); unsigned n = this->m_SampleVectors.size(); unsigned m = m_InducingSampleVectors.size(); if(!(m<=n)){ throw std::string("SparseGaussianProcess::ComputeDerivativeKernelVectorMatrix: number of dense samples must be higher than the number of sparse samples"); } M.resize(n*num_params,m); #pragma omp parallel for for(unsigned i=0; i<n; i++){ for(unsigned j=0; j<m; j++){ typename GaussianProcess<TScalarType>::VectorType v; v = this->m_Kernel->GetDerivative(this->m_SampleVectors[i], m_InducingSampleVectors[j]); if(v.rows() != num_params) throw std::string("SparseGaussianProcess::ComputeDerivativeKernelMatrixInternal: dimension missmatch in derivative."); for(unsigned p=0; p<num_params; p++){ //if(i+p*n >= M.rows() || j+p*n >= M.rows()) throw std::string("GaussianProcess::ComputeDerivativeKernelMatrix: dimension missmatch in derivative."); M(i + p*n, j) = v[p]; //M(j + p*n, i) = v[p]; } } } } /* * Lerning is performed. * * Mean: * Kxm * inv(Kmm) * mu, mu = sigma^2 Kmm * Sigma * Kmn * Y * */ virtual void PreComputeRegression(){ // Computation of kernel matrix if(this->debug){ std::cout << "SparseGaussianProcess::PreComputeRegression: calculating regression vectors and regression matrix... " << std::endl; } bool stable = (m_Jitter<std::numeric_limits<TScalarType>::min())? true : false; MatrixType K; ComputeKernelMatrixWithJitter(K); // inverting inducing kernel matrix m_IndusingInvertedKernelMatrix = this->InvertKernelMatrix(K, this->m_InvMethod, stable); // computing kernel vector matrix between inducing points and dense points MatrixType Kmn; ComputeKernelVectorMatrix(Kmn); // Computing label matrix // calculate label matrix // TODO: if a mean support is implemented, the mean has to be subtracted from the labels! MatrixType Y; ComputeDenseLabelMatrix(Y); // computation of Sigma matrix TScalarType inverse_sigma2 = 1.0/(this->m_Sigma*this->m_Sigma); MatrixType S = K + inverse_sigma2*Kmn.adjoint()*Kmn; m_SigmaMatrix = this->InvertKernelMatrix(S, this->m_InvMethod, stable); // regression vectors for computing mean m_RegressionVectors = m_IndusingInvertedKernelMatrix * (inverse_sigma2*K*m_SigmaMatrix*Kmn.adjoint()*Y); // regression matrix for computing variance m_RegressionMatrix = m_IndusingInvertedKernelMatrix * (K*m_SigmaMatrix*K) * m_IndusingInvertedKernelMatrix; // core matrix, used for likelihoods MatrixType C; ComputeCoreMatrix(C, m_IndusingInvertedKernelMatrix, Kmn); m_CoreMatrix = C; } /* * Computation of the following: * - Inducing kernel matrix K * - Inducing inverted kernel matrix inv(K) * - Cross kernel matrix Kmn * - Identity (noise) matrix I_sigma */ virtual void ComputeCoreMatrices(MatrixType &K, MatrixType &K_inv, MatrixType &Kmn, DiagMatrixType &I_sigma){ bool stable = (m_Jitter<std::numeric_limits<TScalarType>::min())? true : false; if(this->debug) std::cout << "SparseGaussianProcess::ComputeCoreMatrices: compute kernel matrix..." << std::flush; ComputeKernelMatrixWithJitter(K); if(this->debug) std::cout << " [done]" << std::endl; if(this->debug) std::cout << "SparseGaussianProcess::ComputeCoreMatrices: invert kernel matrix..." << std::flush; K_inv = this->InvertKernelMatrix(K, this->m_InvMethod, stable); if(this->debug) std::cout << " [done]" << std::endl; if(this->debug) std::cout << "SparseGaussianProcess::ComputeCoreMatrices: compute link kernel matrix..." << std::flush; ComputeKernelVectorMatrix(Kmn); if(this->debug) std::cout << " [done]" << std::endl; if(this->GetSigma()<=0){ throw std::string("SparseGaussianProcess::ComputeCoreMatrices: sigma must be positive."); } if(Kmn.rows() == 0){ throw std::string("SparseGaussianProcess::ComputeCoreMatrices: empty sample set."); } if(this->debug) std::cout << "SparseGaussianProcess::ComputeCoreMatrices: compute additional noise..." << std::flush; I_sigma.resize(Kmn.rows()); I_sigma.setIdentity(); I_sigma = (I_sigma.diagonal().array() * this->GetSigmaSquared()).matrix().asDiagonal(); if(this->debug) std::cout << " [done]" << std::endl; } /* * Computation of core matrix: Kmn * inv(Kmm) * Knm */ virtual void ComputeCoreMatrix(MatrixType &C, MatrixType &K_inv) const{ bool stable = (m_Jitter<std::numeric_limits<TScalarType>::min())? true : false; MatrixType K; ComputeKernelMatrixWithJitter(K); MatrixType Knm; ComputeKernelVectorMatrix(Knm); std::cout << "C " << Knm.rows() << " x " << Knm.cols() << std::endl; K_inv = this->InvertKernelMatrix(K, this->m_InvMethod, stable); ComputeCoreMatrix(C, K_inv, Knm); } // additional interfaces for ComputeCoreMatrix virtual void ComputeCoreMatrix(MatrixType &C) const{ MatrixType K_inv; ComputeCoreMatrix(C, K_inv); } virtual void ComputeCoreMatrix(MatrixType &C, const MatrixType& K_inv, const MatrixType& Knm) const{ C = Knm * K_inv * Knm.adjoint(); } /* * Computation of the derivative inducing kernel matrix D_i = delta Kmm / delta params_i * - returns a matrix: [D_0 * . * D_i * . * D_l-1] * for l = number of params and D_i in mxm, m = number of inducing samples */ virtual void ComputeDerivativeKernelMatrix(MatrixType &M) const{ if(this->debug){ std::cout << "SparseGaussianProcess::ComputeDerivativeKernelMatrix: building kernel matrix... "; std::cout.flush(); } Superclass::ComputeDerivativeKernelMatrixInternal(M, m_InducingSampleVectors); if(this->debug) std::cout << "[done]" << std::endl; } TScalarType m_Jitter; // noise on inducing kernel matrix bool m_Initialized; VectorListType m_InducingSampleVectors; // Dimensionality: TInputDimension VectorListType m_InducingLabelVectors; // Dimensionality: TOutputDimension MatrixType m_RegressionVectors; // mu of m(x) MatrixType m_SigmaMatrix; MatrixType m_IndusingInvertedKernelMatrix; MatrixType m_RegressionMatrix; MatrixType m_CoreMatrix; // Knm * inv(Kmm) * Kmn private: SparseGaussianProcess(const Self &); //purposely not implemented void operator=(const Self &); //purposely not implemented friend class SparseLikelihood<TScalarType>; }; } // namespace gpr #include "SparseLikelihood.h"
alignblt.c
/********************************************************************[libaroma]* * Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *______________________________________________________________________________ * * Filename : alignblt.c * Description : align blit * * + This is part of libaroma, an embedded ui toolkit. * + 06/04/15 - Author(s): Ahmad Amarullah * */ #ifndef __libaroma_alignblt_c__ #define __libaroma_alignblt_c__ #include <aroma_internal.h> void libaroma_blt_align16(wordp __restrict dst, wordp __restrict src, int w, int h, int dst_stride, int src_stride) { int i; int w2 = w<<1; int ds = w2 + dst_stride; int ss = w2 + src_stride; bytep d = (bytep) dst; bytep s = (bytep) src; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i = 0; i < h; i++) { memcpy( d+ds*i, s+ss*i, w2 ); } } void libaroma_blt_align32_to16(wordp __restrict dst, dwordp __restrict src, int w, int h, int dst_stride, int src_stride) { int i; int dline = w+(dst_stride>>1); int sline = w+(src_stride>>2); #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i = 0; i < h; i++) { libaroma_dither_line( i, w, dst+dline*i, src+sline*i ); } } void libaroma_blt_align16_to32(dwordp __restrict dst, wordp __restrict src, int w, int h, int dst_stride, int src_stride) { int i; int dline = w+(dst_stride>>2); int sline = w+(src_stride>>1); #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i = 0; i < h; i++) { libaroma_btl32( w,dst+dline*i,src+sline*i ); } } void libaroma_blt_align32(dwordp __restrict dst, dwordp __restrict src, int w, int h, int dst_stride, int src_stride) { int i; int w4 = w<<2; int ds = w4 + dst_stride; int ss = w4 + src_stride; bytep d = (bytep) dst; bytep s = (bytep) src; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i = 0; i < h; i++) { memcpy( d+ds*i, s+ss*i, w4 ); } } void libaroma_blt_align_to32_pos(dwordp __restrict dst, wordp __restrict src, int w, int h, int dst_stride, int src_stride, bytep rgb_pos) { int i; int dline = w+(dst_stride>>2); int sline = w+(src_stride>>1); #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i = 0; i < h; i++) { libaroma_color_copy32( dst+dline*i, src+sline*i, w, rgb_pos ); } } void libaroma_blt_align_to16_pos(wordp __restrict dst, dwordp __restrict src, int w, int h, int dst_stride, int src_stride, bytep __restrict rgb_pos) { int i; int dline = w+(dst_stride>>1); int sline = w+(src_stride>>2); #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i = 0; i < h; i++) { libaroma_color_copy16( dst+dline*i, src+sline*i, w, rgb_pos ); } } #endif /* __libaroma_alignblt_c__ */
focal.c
/** * Author: Alf Köhn-Seemann * Email: koehn@igvp.uni-stuttgart.de * Copyright: University of Stuttgart * * This is a 3D FDTD code for simulating electromagnetic waves in cold * magnetized plasmas. * * NOTE: This is an early version, including some obsolete function, those * will be removed in near future. * Furthermore, everything will be properly split into separate * libraries, allowing the usage of a nice make file. * * Initial release on github: 2022-03-31 * **/ #include <limits.h> #include <math.h> #include <stdio.h> #include <string.h> #include <strings.h> #include <stdlib.h> #include <stdarg.h> #include <getopt.h> #include <sys/stat.h> #include <stdbool.h> // check if compiler understands OMP, if not, this file does probably not exist #ifdef _OPENMP #include <omp.h> #endif #define HDF5 #ifdef HDF5 #include "hdf5.h" #endif #ifndef M_PI #define M_PI 3.14159265358979323846 #endif #define ABSORBER_DAMPING(eco,damp) (1.-eco*damp*damp) // setting boundary conditions, possible choices are // 1: simple_abc // 2: Mur #define BOUNDARY 1 #define DETECTOR_ANTENNA_1D // prototyping int make_antenna_profile_4( int exc_signal, double antAngle_zy, double antAngle_zx, double ant_w0x, double ant_w0y, double z2waist, int ant_x, int ant_y, int ant_z, size_t N_x, size_t N_y, double period, double antField_xy[N_x/2][N_y/2], double antPhaseTerms[N_x/2][N_y/2] ); int make_density_profile( int ne_profile, double period, int d_absorb, double cntrl_para, size_t N_x, size_t N_y, size_t N_z, double n_e[N_x/2][N_y/2][N_z/2] ); int set_densityInAbsorber_v2( double period, int d_absorb, char absorber[], size_t N_x, size_t N_y, size_t N_z, double n_e[N_x/2][N_y/2][N_z/2] ); int make_B0_profile( int B0_profile, double period, int d_absorb, double cntrl_para, size_t N_x, size_t N_y, size_t N_z, double J_B0[N_x][N_y][N_z] ); int add_source( int exc_signal, size_t N_x, size_t N_y, size_t N_z, double period, int ant_z, int t_int, double omega_t, double antField_xy[N_x/2][N_y/2], double antPhaseTerms[N_x/2][N_y/2], double EB_WAVE[N_x][N_y][N_z] ); int apply_absorber( size_t N_x, size_t N_y, size_t N_z, int d_absorb, double eco, double EB_WAVE[N_x][N_y][N_z] ); int apply_absorber_v2( size_t N_x, size_t N_y, size_t N_z, int d_absorb, double eco, char absorber[], double EB_WAVE[N_x][N_y][N_z] ); int abc_Mur_saveOldE_xdir( size_t N_x, size_t N_y, size_t N_z, double EB_WAVE[N_x][N_y][N_z], double E_old[8][N_y][N_z] ); int abc_Mur_saveOldE_ydir( size_t N_x, size_t N_y, size_t N_z, double EB_WAVE[N_x][N_y][N_z], double E_old[N_y][8][N_z] ); int abc_Mur_saveOldE_zdir( size_t N_x, size_t N_y, size_t N_z, double EB_WAVE[N_x][N_y][N_z], double E_old[N_x][N_y][8] ); int abs_Mur_1st( size_t N_x, size_t N_y, size_t N_z, double dt, double dx, double EB_WAVE[N_x][N_y][N_z], double E_old_xdir[8][N_y][N_z], double E_old_ydir[N_x][8][N_z], double E_old_zdir[N_x][N_y][8] ); int abs_Mur_1st_v2( size_t N_x, size_t N_y, size_t N_z, double dt, double dx, char absorber[], double EB_WAVE[N_x][N_y][N_z], double E_old_xdir[8][N_y][N_z], double E_old_ydir[N_x][8][N_z], double E_old_zdir[N_x][N_y][8] ); int advance_J( size_t dim1, size_t dim2, size_t dim3, double EB_WAVE[dim1][dim2][dim3], double J_B0[dim1][dim2][dim3], double n_e[dim1/2][dim2/2][dim3/2], double dt ); int advance_B( size_t dim1, size_t dim2, size_t dim3, double EB_WAVE[dim1][dim2][dim3], double dx, double dt ); int advance_E( size_t dim1, size_t dim2, size_t dim3, double EB_WAVE[dim1][dim2][dim3], double J_B0[dim1][dim2][dim3], double dx, double dt ); int advance_E_vacuum( size_t dim1, size_t dim2, size_t dim3, double EB_WAVE[dim1][dim2][dim3], double dx, double dt ); int calc_poynt_1( size_t N_x, size_t N_y, size_t N_z, int pwr_dect, char absorber[], double poynt[3], double EB_WAVE[N_x][N_y][N_z] ); double calc_poynt_2( size_t N_x, size_t N_y, size_t N_z, int pwr_dect, char absorber[], double EB_WAVE[N_x][N_y][N_z] ); double calc_poynt_3( size_t N_x, size_t N_y, size_t N_z, size_t N_z_ref, int pwr_dect, char absorber[], double EB_WAVE[N_x][N_y][N_z], double EB_WAVE_ref[N_x][N_y][N_z_ref] ); double calc_poynt_4( size_t N_x, size_t N_y, size_t N_z, size_t N_z_ref, int pwr_dect, char absorber[], double EB_WAVE[N_x][N_y][N_z], double EB_WAVE_ref[N_x][N_y][N_z_ref] ); double calc_poynt_5( size_t N_x, size_t N_y, size_t N_z, size_t N_z_ref, int pwr_dect, char absorber[], double EB_WAVE[N_x][N_y][N_z], double EB_WAVE_ref[N_x][N_y][N_z_ref] ); double calc_poynt_6( size_t N_x, size_t N_y, size_t N_z, size_t N_z_ref, int pwr_dect, char absorber[], double EB_WAVE[N_x][N_y][N_z], double EB_WAVE_ref[N_x][N_y][N_z_ref] ); double calc_poynt_7( size_t N_x, size_t N_y, size_t N_z, size_t N_z_ref, int pwr_dect, char absorber[], double EB_WAVE[N_x][N_y][N_z], double EB_WAVE_ref[N_x][N_y][N_z_ref] ); double calc_power_EE_1( size_t N_x, size_t N_y, size_t N_z, size_t N_z_ref, int pwr_dect, char absorber[], double EB_WAVE[N_x][N_y][N_z], double EB_WAVE_ref[N_x][N_y][N_z_ref] ); int set2zero_1D( size_t N_x, double arr_1D[N_x] ); int set2zero_3D( size_t N_x, size_t N_y, size_t N_z, double arr_3D[N_x][N_y][N_z] ); int writeTimetraces2ascii( int dim0, int dim1, int t_end, double period, char filename[], double timetraces[dim0][dim1] ); #ifdef DETECTOR_ANTENNA_1D int detAnt1D_storeValues( size_t N_x, size_t N_y, size_t N_z, size_t detAnt_ypos, size_t detAnt_zpos, int tt, double period, double EB_WAVE[N_x][N_y][N_z], double detAnt_fields[N_x/2][5] ); #endif #if defined(HDF5) && defined(DETECTOR_ANTENNA_1D) int detAnt1D_write2hdf5( int N_x, char filename[], char detAnt_groupName[], size_t detAnt_ypos, size_t detAnt_zpos, double detAnt_fields[N_x/2][5] ); #endif #ifdef HDF5 int writeMyHDF_v4( int dim0, int dim1, int dim2, char filename[], char dataset[], double array_3D[dim0][dim1][dim2] ); int writeConfig2HDF( char filename[], int N_x, int N_y, int N_z, int period, int d_absorb ); int readMyHDF( int dim0, int dim1, int dim2, char filename[], char dataset[], double array_3D[dim0][dim1][dim2]); #endif int main( int argc, char *argv[] ) { //{{{ int ii,jj,kk, t_int, t_end, T_wave, ant_x, ant_y, ant_z, scale, NX, NY, NZ, NZ_ref, #ifdef _OPENMP n_threads, // number of threads that will be used (OpenMP) #endif d_absorb, pwr_dect, #ifdef DETECTOR_ANTENNA_1D detAnt_01_zpos, detAnt_02_zpos, detAnt_03_zpos, detAnt_04_zpos, detAnt_05_zpos, detAnt_06_zpos, detAnt_07_zpos, detAnt_01_ypos, #endif len_str, // return value of snprintf opt_ret; // return value of getopt (reading input parameter) double period, dx,dt, #if BOUNDARY == 1 eco, #endif //ant_phase, GouyPhase_beam, antAngle_zy, antAngle_zx, ant_w0x, ant_w0y, poynt_x1, poynt_x2, poynt_y1, poynt_y2, poynt_z1, poynt_z2, poynt_z1_ref, power_abs_x1, power_abs_x2, power_abs_y1, power_abs_y2, power_abs_z1, power_abs_z2, power_abs_ref, /* power_EE_x1, power_EE_x2, power_EE_y1, power_EE_y2, power_EE_z1, power_EE_z2, power_EE_ref, */ aux, omega_t; char dSet_name[PATH_MAX], filename_hdf5[PATH_MAX]; // filename of hdf5 file for output bool angle_zx_set, // is antAngle_zx set during call ? angle_zy_set; // is antAngle_zy set during call ? // set-up grid scale = 1; period = 16*scale; #if BOUNDARY == 1 d_absorb = (int)(3*period); #elif BOUNDARY == 2 d_absorb = 8; #endif NX = (1280)*scale; NY = (400)*scale; NZ = (480)*scale; NZ_ref = 2*d_absorb + (int)period; t_end = (int)((150+0*250)*period); // arrays realized as variable-length array (VLA) // E- and B-wavefield double (*EB_WAVE)[NY][NZ] = calloc(NX, sizeof *EB_WAVE); double (*EB_WAVE_ref)[NY][NZ_ref] = calloc(NX, sizeof *EB_WAVE_ref); // J-wavefield (in plasma) and background magnetic field double (*J_B0)[NY][NZ] = calloc(NX, sizeof *J_B0); // background electron plasma density double (*n_e)[NY/2][NZ/2] = calloc(NX/2, sizeof *n_e); // used when writing data into hdf5-files double (*data2save)[NY/2][NZ/2] = calloc(NX/2, sizeof *data2save); // antenna: envelope of injected field double (*antField_xy)[NY/2] = calloc(NX/2, sizeof *antField_xy); // antenna: phase terms double (*antPhaseTerms)[NY/2] = calloc(NX/2, sizeof *antPhaseTerms); // time traces double (*timetraces)[8] = calloc((t_end/(int)period), sizeof *timetraces); // old E-fields required for Mur's boundary condition #if BOUNDARY == 2 double (*E_Xdir_OLD)[NY][NZ] = calloc(8, sizeof *E_Xdir_OLD); double (*E_Ydir_OLD)[8][NZ] = calloc(NX, sizeof *E_Ydir_OLD); double (*E_Zdir_OLD)[NY][8] = calloc(NX, sizeof *E_Zdir_OLD); double (*E_Xdir_OLD_ref)[NY][NZ_ref]= calloc(8, sizeof *E_Xdir_OLD_ref); double (*E_Ydir_OLD_ref)[8][NZ_ref] = calloc(NX, sizeof *E_Ydir_OLD_ref); double (*E_Zdir_OLD_ref)[NY][8] = calloc(NX, sizeof *E_Zdir_OLD_ref); #endif // array for detector antennas // sum_t(Ex*Ex) | sum_t(Ey*Ey) | sum_t(Ez*Ez) | sum_t(E*E) | rms(E) #ifdef DETECTOR_ANTENNA_1D // TODO: change into 3D array, such that each detector antenna corresponds // to one 2D array; that way it can be written much more failsafe... // requires some changes in procedures for storing and saving double (*detAnt_01_fields)[5] = calloc(NX, sizeof *detAnt_01_fields); double (*detAnt_02_fields)[5] = calloc(NX, sizeof *detAnt_02_fields); double (*detAnt_03_fields)[5] = calloc(NX, sizeof *detAnt_03_fields); double (*detAnt_04_fields)[5] = calloc(NX, sizeof *detAnt_04_fields); double (*detAnt_05_fields)[5] = calloc(NX, sizeof *detAnt_05_fields); double (*detAnt_06_fields)[5] = calloc(NX, sizeof *detAnt_06_fields); double (*detAnt_07_fields)[5] = calloc(NX, sizeof *detAnt_07_fields); #endif // reading input parameter // used for checking if input parameter was provided angle_zx_set = false; angle_zy_set = false; // default values to be used if input parameter are not set antAngle_zx = 0; antAngle_zy = 0; // loop through input parameter printf( "number of input parameters provided during call: %d\n", argc-1 ); while ( (opt_ret = getopt(argc, argv, "a:b:")) != -1 ){ switch (opt_ret) { // angle between z=const plane and x=const plane case 'a': antAngle_zx = atof(optarg); angle_zx_set = true; break; case 'b': antAngle_zy = atof(optarg); angle_zy_set = true; break; } } if ( argc > 1 ) { printf( "following parameters were set during call: \n" ); if (angle_zx_set) printf( " antAngle_zx = %f\n", antAngle_zx ); if (angle_zy_set) printf( " antAngle_zy = %f\n", antAngle_zy ); } ant_x = round(d_absorb + 18.68*period);//NX/2; ant_y = NY/2;//d_absorb + 6*period;//NY/2; ant_z = d_absorb + 4; // positions have to be even numbers, to ensure fields are accessed correctly if ((ant_x % 2) != 0) ++ant_x; if ((ant_y % 2) != 0) ++ant_y; if ((ant_z % 2) != 0) ++ant_z; ant_w0x = 4; ant_w0y = 4; pwr_dect = d_absorb; #ifdef DETECTOR_ANTENNA_1D detAnt_01_ypos = ant_y; detAnt_01_zpos = ant_z+2; detAnt_02_zpos = round(ant_z+2 + 1*4.67*period); // steps of 5 cm for 28 GHz detAnt_03_zpos = round(ant_z+2 + 2*4.67*period); detAnt_04_zpos = round(ant_z+2 + 3*4.67*period); detAnt_05_zpos = round(ant_z+2 + 4*4.67*period); detAnt_06_zpos = round(ant_z+2 + 5*4.67*period); detAnt_07_zpos = round(ant_z+2 + 6*4.67*period); // positions have to be even numbers, to ensure fields are accessed correctly if ((detAnt_01_ypos % 2) != 0) ++detAnt_01_ypos; if ((detAnt_01_zpos % 2) != 0) ++detAnt_01_zpos; if ((detAnt_02_zpos % 2) != 0) ++detAnt_02_zpos; if ((detAnt_03_zpos % 2) != 0) ++detAnt_03_zpos; if ((detAnt_04_zpos % 2) != 0) ++detAnt_04_zpos; if ((detAnt_05_zpos % 2) != 0) ++detAnt_05_zpos; if ((detAnt_06_zpos % 2) != 0) ++detAnt_06_zpos; if ((detAnt_07_zpos % 2) != 0) ++detAnt_07_zpos; // issue a warning when detector antenna position is beyond Nz if (detAnt_07_zpos > (NZ - d_absorb)) { printf( "ERROR: check the detector antenna positions into z direction\n" ); printf( " NZ-d_absorb = %d, detAnt_07_zpos = %d", NZ-d_absorb, detAnt_07_zpos ); } #endif // dt/dx = 0.5 is commenly used in 2D FDTD codes // Note that period refers to the wavelength in the numerical grid and not // in the "physical" grid (where one grid cell is equal to one Yee cell). // This means that in the physical grid, the wavelength is period/2, thus // in the equations we have to use period/2 for the wavelength. dx = 1./(period/2); dt = 1./(2.*(period/2)); #if BOUNDARY == 1 eco = 10./(double)(period); #endif T_wave = 0; omega_t = .0; // the arrays are initialized with calloc() and thus don't require zeroing printf( "starting to set all variables to 0...\n" ); power_abs_x1 = .0; power_abs_x2 = .0; power_abs_y1 = .0; power_abs_y2 = .0; power_abs_z1 = .0; power_abs_z2 = .0; power_abs_ref = 1e-7; poynt_x1 = .0; poynt_x2 = .0; poynt_y1 = .0; poynt_y2 = .0; poynt_z1 = .0; poynt_z1_ref = .0; poynt_z2 = .0; /* power_EE_x1 = .0; power_EE_x2 = .0; power_EE_y1 = .0; power_EE_y2 = .0; power_EE_z1 = .0; power_EE_z2 = .0; power_EE_ref = .0; */ printf( "...done setting all variables to 0\n" ); printf( "starting do define antenna field...\n" ); make_antenna_profile_4( 1, antAngle_zy, antAngle_zx, ant_w0x, ant_w0y, -(298.87)*.0, // .2/l_0*period = -298.87 ant_x, ant_y, ant_z, NX, NY, period, antField_xy, antPhaseTerms ); printf( "...done defining antenna field\n" ); printf( "starting defining background plasma density\n" ); make_density_profile( // ne_profile: 1 = plasma mirror // 2 = linearly increasing profile 2, period, d_absorb, // cntrl_para: ne_profile=1 --> 0: plane mirror; oblique mirror: -.36397; 20 degrees: -.17633 // ne_profile=2 --> k0*Ln: 25 25, NX, NY, NZ, n_e ); printf( " ...setting density in absorber to 0...\n "); //set_densityInAbsorber_v2( period, d_absorb, "z1", NX, NY, NZ, n_e ); //set_densityInAbsorber_v2( period, d_absorb, "x1x2y1y2z1", NX, NY, NZ, n_e ); printf( "...done defining background plasma density\n" ); printf( "starting defining background magnetic field...\n" ); // B0x: even-odd-odd // B0y: odd-even-odd // B0z: odd-odd-even make_B0_profile( // B0_profile: 1 = constant field 1, period, d_absorb, // cntrl_para: B0_profile=1 --> value of Y .85, NX, NY, NZ, J_B0 ); printf( "...done defining background magnetic field\n" ); // print some info to console printf( "Nx = %d, Ny = %d, Nz = %d\n", NX, NY, NZ ); printf( "period = %d\n", (int)(period) ); printf( "d_absorb = %d\n", d_absorb ); printf( "t_end = %d\n", (int)(t_end) ); printf( "antAngle_zx = %.2f, antAngle_zy = %.2f\n", antAngle_zx, antAngle_zy ); printf( "ant_w0x = %.2f, ant_w0y = %.2f\n", ant_w0x, ant_w0y ); printf( "ant_x = %d, ant_y = %d, ant_z = %d\n", ant_x, ant_y, ant_z ); printf( "Boundary condition set to '%d'\n", BOUNDARY ); #ifdef DETECTOR_ANTENNA_1D printf( "detector antenna positions: z1 = %d, y1 = %d\n", detAnt_01_zpos, detAnt_01_ypos ); printf( "detector antenna positions: z2 = %d, y1 = %d\n", detAnt_02_zpos, detAnt_01_ypos ); printf( "detector antenna positions: z3 = %d, y1 = %d\n", detAnt_03_zpos, detAnt_01_ypos ); printf( "detector antenna positions: z4 = %d, y1 = %d\n", detAnt_04_zpos, detAnt_01_ypos ); printf( "detector antenna positions: z5 = %d, y1 = %d\n", detAnt_05_zpos, detAnt_01_ypos ); printf( "detector antenna positions: z6 = %d, y1 = %d\n", detAnt_06_zpos, detAnt_01_ypos ); printf( "detector antenna positions: z7 = %d, y1 = %d\n", detAnt_07_zpos, detAnt_01_ypos ); #endif #ifdef _OPENMP #pragma omp parallel private(n_threads) { n_threads = omp_get_num_threads(); printf( "number of threads that will be used (OpenMP) = %d\n", n_threads ); } #endif for (t_int=0 ; t_int <=t_end ; ++t_int) { omega_t += 2.*M_PI/period; // to avoid precision problems when a lot of pi's are summed up if (omega_t >= 2.*M_PI) { omega_t += -2.*M_PI; T_wave += 1; //printf("status: number of oscillation periods: %d (t_int= %d) \n",T_wave,t_int); } // add source add_source( 3, //add_source( 1, NX, NY, NZ, period, ant_z, t_int, omega_t, antField_xy, antPhaseTerms, EB_WAVE ); add_source( 3, //add_source( 1, NX, NY, NZ_ref, period, ant_z, t_int, omega_t, antField_xy, antPhaseTerms, EB_WAVE_ref ); // apply absorbers #if BOUNDARY == 1 apply_absorber( NX, NY, NZ, d_absorb, eco, EB_WAVE ); apply_absorber( NX, NY, NZ_ref, d_absorb, eco, EB_WAVE_ref ); #endif // advance J // Jx: odd-even-even // Jy: even-odd-even // Jz: even-even-odd // B0x: even-odd-odd // B0y: odd-even-odd // B0z: odd-odd-even advance_J( NX, NY, NZ, EB_WAVE, J_B0, n_e, dt ); // advance B advance_B( NX, NY, NZ, EB_WAVE, dx, dt ); advance_B( NX, NY, NZ_ref, EB_WAVE_ref, dx, dt ); // advance E advance_E( NX, NY, NZ, EB_WAVE, J_B0, dx, dt ); advance_E_vacuum( NX, NY, NZ_ref, EB_WAVE_ref, dx, dt ); // apply Mur's boundary conditions #if BOUNDARY == 2 abs_Mur_1st_v2( NX, NY, NZ, dt, dx, "x1x2y1y2z1z2", EB_WAVE, E_Xdir_OLD, E_Ydir_OLD, E_Zdir_OLD ); abs_Mur_1st( NX, NY, NZ_ref, dt, dx, EB_WAVE_ref, E_Xdir_OLD_ref, E_Ydir_OLD_ref, E_Zdir_OLD_ref ); abc_Mur_saveOldE_xdir( NX, NY, NZ, EB_WAVE, E_Xdir_OLD ); abc_Mur_saveOldE_ydir( NX, NY, NZ, EB_WAVE, E_Ydir_OLD ); abc_Mur_saveOldE_zdir( NX, NY, NZ, EB_WAVE, E_Zdir_OLD ); abc_Mur_saveOldE_xdir( NX, NY, NZ_ref, EB_WAVE_ref, E_Xdir_OLD_ref ); abc_Mur_saveOldE_ydir( NX, NY, NZ_ref, EB_WAVE_ref, E_Ydir_OLD_ref ); abc_Mur_saveOldE_zdir( NX, NY, NZ_ref, EB_WAVE_ref, E_Zdir_OLD_ref ); #endif #ifdef DETECTOR_ANTENNA_1D // store wavefields for detector antennas over the final 10 // oscillation periods, it was found previously that only one period // does not result in a too nice average if ( t_int >= (t_end-10*period) ) { detAnt1D_storeValues( NX, NY, NZ, detAnt_01_ypos, detAnt_01_zpos, t_int, period, EB_WAVE, detAnt_01_fields ); detAnt1D_storeValues( NX, NY, NZ, detAnt_01_ypos, detAnt_02_zpos, t_int, period, EB_WAVE, detAnt_02_fields ); detAnt1D_storeValues( NX, NY, NZ, detAnt_01_ypos, detAnt_03_zpos, t_int, period, EB_WAVE, detAnt_03_fields ); detAnt1D_storeValues( NX, NY, NZ, detAnt_01_ypos, detAnt_04_zpos, t_int, period, EB_WAVE, detAnt_04_fields ); detAnt1D_storeValues( NX, NY, NZ, detAnt_01_ypos, detAnt_05_zpos, t_int, period, EB_WAVE, detAnt_05_fields ); detAnt1D_storeValues( NX, NY, NZ, detAnt_01_ypos, detAnt_06_zpos, t_int, period, EB_WAVE, detAnt_06_fields ); detAnt1D_storeValues( NX, NY, NZ, detAnt_01_ypos, detAnt_07_zpos, t_int, period, EB_WAVE, detAnt_07_fields ); } #endif // IQ detector for power detection if ( t_int >= 20*period ) { // z1-plane and z2-plane poynt_z1_ref = calc_poynt_4( NX, NY, NZ, NZ_ref, pwr_dect, "ref_z1", EB_WAVE, EB_WAVE_ref ); poynt_z1 = calc_poynt_4( NX, NY, NZ, NZ_ref, pwr_dect, "z1", EB_WAVE, EB_WAVE_ref ); //poynt_z1_ref = ( calc_poynt_4( NX, NY, NZ, NZ_ref, pwr_dect , "ref_z1", EB_WAVE, EB_WAVE_ref ) // +calc_poynt_4( NX, NY, NZ, NZ_ref, pwr_dect+2, "ref_z1", EB_WAVE, EB_WAVE_ref ) )*.5; //poynt_z1 = ( calc_poynt_4( NX, NY, NZ, NZ_ref, pwr_dect, "z1", EB_WAVE, EB_WAVE_ref ) // +calc_poynt_4( NX, NY, NZ, NZ_ref, pwr_dect+2, "z1", EB_WAVE, EB_WAVE_ref ) )*.5; poynt_z2 = calc_poynt_4( NX, NY, NZ, NZ_ref, pwr_dect, "z2", EB_WAVE, EB_WAVE_ref ); // x1-plane and x2-plane //poynt_x1 = calc_poynt_4( NX, NY, NZ, NZ_ref, pwr_dect, "x1", EB_WAVE, EB_WAVE_ref ); //poynt_x2 = calc_poynt_4( NX, NY, NZ, NZ_ref, pwr_dect, "x2", EB_WAVE, EB_WAVE_ref ); // y1-plane and y2-plane //poynt_y1 = calc_poynt_4( NX, NY, NZ, NZ_ref, pwr_dect, "y1", EB_WAVE, EB_WAVE_ref ); //poynt_y2 = calc_poynt_4( NX, NY, NZ, NZ_ref, pwr_dect, "y2", EB_WAVE, EB_WAVE_ref ); // printf( "t = %d, power_abs_ref = %13.5e, power_abs_z1 = %13.5e, power_abs_z2 = %13.5e, poynt_z1 = %13.5e, poynt_z2 = %13.5e\n", // t_int, power_abs_ref, power_abs_z1, power_abs_z2, poynt_z1, poynt_z2 ); power_abs_ref = .99*power_abs_ref + .01*poynt_z1_ref; power_abs_z1 = .99*power_abs_z1 + .01*poynt_z1; power_abs_z2 = .99*power_abs_z2 + .01*poynt_z2; power_abs_x1 = .99*power_abs_x1 + .01*poynt_x1; power_abs_x2 = .99*power_abs_x2 + .01*poynt_x2; power_abs_y1 = .99*power_abs_y1 + .01*poynt_y1; power_abs_y2 = .99*power_abs_y2 + .01*poynt_y2; /* // EE // z1-plane and z2-plane power_EE_ref += calc_power_EE_1( NX, NY, NZ, NZ_ref, d_absorb, "ref_z1", EB_WAVE, EB_WAVE_ref ); power_EE_z1 += calc_power_EE_1( NX, NY, NZ, NZ_ref, d_absorb, "z1", EB_WAVE, EB_WAVE_ref ); power_EE_z2 += calc_power_EE_1( NX, NY, NZ, NZ_ref, d_absorb, "z2", EB_WAVE, EB_WAVE_ref ); // x1-plane and x2-plane power_EE_x1 += calc_power_EE_1( NX, NY, NZ, NZ_ref, d_absorb, "x1", EB_WAVE, EB_WAVE_ref ); power_EE_x2 += calc_power_EE_1( NX, NY, NZ, NZ_ref, d_absorb, "x2", EB_WAVE, EB_WAVE_ref ); // y1-plane and y2-plane power_EE_y1 += calc_power_EE_1( NX, NY, NZ, NZ_ref, d_absorb, "y1", EB_WAVE, EB_WAVE_ref ); power_EE_y2 += calc_power_EE_1( NX, NY, NZ, NZ_ref, d_absorb, "y2", EB_WAVE, EB_WAVE_ref ); */ } if ( (t_int % (int)(period)) == 4 ) { printf( "status: number of oscillation periods: %d (t_int= %d) \n",T_wave,t_int); printf( " Poynting-power: z1 = %13.6e, z2 = %13.6e, x1 = %13.6e, x2 = %13.6e, y1 = %13.6e, y2 = %13.6e, (z1+z2+x1+x2+y1+y2)/z1_ref = %13.6e %%\n", power_abs_z1/power_abs_ref, power_abs_z2/power_abs_ref, power_abs_x1/power_abs_ref, power_abs_x2/power_abs_ref, power_abs_y1/power_abs_ref, power_abs_y2/power_abs_ref, (power_abs_x1+power_abs_x2 + power_abs_y1+power_abs_y2 + power_abs_z1+power_abs_z2)/power_abs_ref * 100. ); /* printf( " Power_EE_d-abs: z1 = %13.6e, z2 = %13.6e, x1 = %13.6e, x2 = %13.6e, y1 = %13.6e, y2 = %13.6e, ref = %13.6e\n", power_EE_z1, power_EE_z2, power_EE_x1, power_EE_x2, power_EE_y1, power_EE_y2, power_EE_ref // (power_abs_x1+power_abs_x2 + power_abs_y1+power_abs_y2 + power_abs_z1+power_abs_z2)/power_abs_ref * 100. ); */ timetraces[T_wave][0] = (double)t_int; timetraces[T_wave][1] = (double)T_wave; timetraces[T_wave][2] = power_abs_z1/power_abs_ref; timetraces[T_wave][3] = power_abs_z2/power_abs_ref; timetraces[T_wave][4] = power_abs_x1/power_abs_ref; timetraces[T_wave][5] = power_abs_x2/power_abs_ref; timetraces[T_wave][6] = power_abs_y1/power_abs_ref; timetraces[T_wave][7] = power_abs_y2/power_abs_ref; } } // end of time loop printf( "-------------------------------------------------------------------------------------------------------------\n" ); printf( " T | poynt_z1 | poynt_z2 | poynt_x1 | poynt_x2 | poynt_y1 | poynt_y2 | P_out \n" ); printf( "------+--------------+--------------+--------------+--------------+--------------+--------------+------------\n" ); for ( ii=0 ; ii<(t_end/(int)period) ; ++ii ) printf( " %4d |%13.6e |%13.6e |%13.6e |%13.6e |%13.6e |%13.6e |%13.6e\n", (int)timetraces[ii][1], //timetraces[ii][1], timetraces[ii][2], timetraces[ii][3], timetraces[ii][4], timetraces[ii][5], timetraces[ii][6], timetraces[ii][7], (timetraces[ii][2]+timetraces[ii][3] + timetraces[ii][4]+timetraces[ii][5] + timetraces[ii][6]+timetraces[ii][7]) ); printf( "-------------------------------------------------------------------------------------------------------------\n" ); // write timetrace data into file // open file in w(rite) mode; might consider using a+ instead writeTimetraces2ascii( (t_end/(int)period), 8, t_end, period, "timetraces2.dat", timetraces ); // save into hdf5 // abs(E) // prepare array for that #pragma omp parallel for collapse(3) default(shared) private(ii,jj,kk) for (ii=0 ; ii<NX ; ii+=2) { for (jj=0 ; jj<NY ; jj+=2) { for (kk=0 ; kk<NZ ; kk+=2) { data2save[(ii/2)][(jj/2)][(kk/2)] = sqrt( pow(EB_WAVE[ii+1][jj ][kk ],2) +pow(EB_WAVE[ii ][jj+1][kk ],2) +pow(EB_WAVE[ii ][jj ][kk+1],2) ); } } } len_str = snprintf( filename_hdf5, sizeof(filename_hdf5), "fileout.h5"); if ( (len_str < 0) || (len_str >= sizeof(filename_hdf5)) ) { printf( "ERROR: could not write filename_hdf5 string\n" ); // use a proper error handler here } else { sprintf( dSet_name, "E_abs__tint%05d", t_int ); printf( "status of writeMyHDF_v4: %d\n", writeMyHDF_v4( NX/2, NY/2, NZ/2, filename_hdf5, dSet_name, data2save) ) ; } // density sprintf( dSet_name, "n_e" ); printf( "status of writeMyHDF_v4: %d\n", writeMyHDF_v4( NX/2, NY/2, NZ/2, filename_hdf5, dSet_name, n_e) ) ; set2zero_3D( NX/2, NY/2, NZ/2, data2save ); // background magnetic field // B0x: even-odd-odd #pragma omp parallel for collapse(3) default(shared) private(ii,jj,kk) for (ii=0 ; ii<NX ; ii+=2) { for (jj=0 ; jj<NY ; jj+=2) { for (kk=0 ; kk<NZ ; kk+=2) { data2save[(ii/2)][(jj/2)][(kk/2)] = J_B0[ii ][jj+1][kk+1]; } } } printf( "status of writeMyHDF_v4: %d\n", writeMyHDF_v4( NX/2, NY/2, NZ/2, filename_hdf5, "B0x", data2save) ) ; set2zero_3D( NX/2, NY/2, NZ/2, data2save ); // B0y: odd-even-odd #pragma omp parallel for collapse(3) default(shared) private(ii,jj,kk) for (ii=0 ; ii<NX ; ii+=2) { for (jj=0 ; jj<NY ; jj+=2) { for (kk=0 ; kk<NZ ; kk+=2) { data2save[(ii/2)][(jj/2)][(kk/2)] = J_B0[ii+1][jj ][kk+1]; } } } printf( "status of writeMyHDF_v4: %d\n", writeMyHDF_v4( NX/2, NY/2, NZ/2, filename_hdf5, "B0y", data2save) ) ; set2zero_3D( NX/2, NY/2, NZ/2, data2save ); // B0z: odd-odd-even #pragma omp parallel for collapse(3) default(shared) private(ii,jj,kk) for (ii=0 ; ii<NX ; ii+=2) { for (jj=0 ; jj<NY ; jj+=2) { for (kk=0 ; kk<NZ ; kk+=2) { data2save[(ii/2)][(jj/2)][(kk/2)] = J_B0[ii+1][jj+1][kk ]; } } } printf( "status of writeMyHDF_v4: %d\n", writeMyHDF_v4( NX/2, NY/2, NZ/2, filename_hdf5, "B0z", data2save) ) ; set2zero_3D( NX/2, NY/2, NZ/2, data2save ); writeConfig2HDF( filename_hdf5, NX, NY, NZ, period, d_absorb ); #if defined(HDF5) && defined(DETECTOR_ANTENNA_1D) detAnt1D_write2hdf5( NX, filename_hdf5, "/detAnt_01" , detAnt_01_ypos, detAnt_01_zpos, detAnt_01_fields ); detAnt1D_write2hdf5( NX, filename_hdf5, "/detAnt_02" , detAnt_01_ypos, detAnt_02_zpos, detAnt_02_fields ); detAnt1D_write2hdf5( NX, filename_hdf5, "/detAnt_03" , detAnt_01_ypos, detAnt_03_zpos, detAnt_03_fields ); detAnt1D_write2hdf5( NX, filename_hdf5, "/detAnt_04" , detAnt_01_ypos, detAnt_04_zpos, detAnt_04_fields ); detAnt1D_write2hdf5( NX, filename_hdf5, "/detAnt_05" , detAnt_01_ypos, detAnt_05_zpos, detAnt_05_fields ); detAnt1D_write2hdf5( NX, filename_hdf5, "/detAnt_06" , detAnt_01_ypos, detAnt_06_zpos, detAnt_06_fields ); detAnt1D_write2hdf5( NX, filename_hdf5, "/detAnt_07" , detAnt_01_ypos, detAnt_07_zpos, detAnt_07_fields ); #endif free( EB_WAVE ); free( J_B0 ); free( n_e ); free( data2save ); return EXIT_SUCCESS; }//}}} int make_antenna_profile_4( int exc_signal, double antAngle_zy, double antAngle_zx, double ant_w0x, double ant_w0y, double z2waist, int ant_x, int ant_y, int ant_z, size_t N_x, size_t N_y, double period, double antField_xy[N_x/2][N_y/2], double antPhaseTerms[N_x/2][N_y/2] ) { //{{{ // like make_antenna_profile_3 but with previously missing optional for z2waist // i.e. allowing now for converging beams with their waist not in the antenna plane int ii,jj; double antBeam_z_x, antBeam_z_y, antBeam_r_x, antBeam_r_y, antBeam_wx, antBeam_wy, antPhase_x, antPhase_y, antPhaseCurve_xR, antPhaseCurve_yR, antPhaseCurve_x, antPhaseCurve_y, antPhaseGouy_x, antPhaseGouy_y; for (ii=0 ; ii<(N_x/2) ; ++ii) { // beam coordinate system antBeam_r_x = ((double)ii-(double)ant_x/2.) * cos(antAngle_zx/180.*M_PI); antBeam_z_x = ((double)ii-(double)ant_x/2.) * sin(antAngle_zx/180.*M_PI) * cos(antAngle_zy/180.*M_PI) + z2waist/2; // account for tilted Gauss beam // w(z)=w0*sqrt(1+(lambda*z/pi*w0^2)^2) antBeam_wx = ant_w0x*(period/2.) * sqrt( 1. + pow( (period/2)*antBeam_z_x/( M_PI*pow(ant_w0x*(period/2), 2) ) , 2) ); // phase variation along beam in atenna plane antPhase_x = antBeam_z_x * 2.*M_PI/(period/2.); // phase variation due to curvature of phase fronts // radius of curvature of phasefronts: R(z)=z+1/z*(pi*w0^2/lambda)^2 antPhaseCurve_xR = antBeam_z_x + 1./(antBeam_z_x + 1e-5) *pow( M_PI * pow(ant_w0x*period/2., 2) / (period/2) , 2 ); antPhaseCurve_x = pow(antBeam_r_x,2) / (2.*antPhaseCurve_xR) * 2.*M_PI/(period/2); for (jj=0 ; jj<(N_y/2) ; ++jj) { // beam coordinate system antBeam_r_y = ((double)jj-(double)ant_y/2.) * cos(antAngle_zy/180.*M_PI); antBeam_z_y = ((double)jj-(double)ant_y/2.) * sin(antAngle_zy/180.*M_PI) * cos(antAngle_zx/180.*M_PI) + z2waist/2; // account for tilted Gauss beam // w(z)=w0*sqrt(1+(lambda*z/pi*w0^2)^2) antBeam_wy = ant_w0y*(period/2.) * sqrt( 1. + pow( (period/2.)*antBeam_z_y/( M_PI*pow(ant_w0y*(period/2.), 2) ) , 2) ); // envelope of antenna field antField_xy[ii][jj] = exp( -1.*pow(antBeam_r_x/antBeam_wx, 2) ) *exp( -1.*pow(antBeam_r_y/antBeam_wy, 2) ); // factor: w0/w(z) antField_xy[ii][jj] *= ant_w0x*(period/2)/antBeam_wx * ant_w0y*(period/2)/antBeam_wy; // phase variation along beam in atenna plane antPhase_y = antBeam_z_y * 2.*M_PI/(period/2.); // phase variation due to curvature of phase fronts // radius of curvature of phasefronts: R(z)=z+1/z*(pi*w0^2/lambda)^2 antPhaseCurve_yR = antBeam_z_y + 1./(antBeam_z_y + 1e-5) *pow( M_PI * pow(ant_w0y*period/2., 2) / (period/2.) , 2 ); antPhaseCurve_y = pow(antBeam_r_y,2) / (2.*antPhaseCurve_yR) * 2.*M_PI/(period/2.); // account for the Gouy-phase // phase_Gouy = arctan(z/z_R) // with z_R = pi*w_0^2/lambda the Rayleigh range //GouyPhase_beam = atan( period * z_beam / (M_PI * pow(ant_w0x*period, 2)) ); antPhaseGouy_x = atan( period/2.*antBeam_z_x / (M_PI * pow(ant_w0x*period/2., 2) ) ); antPhaseGouy_y = atan( period/2.*antBeam_z_y / (M_PI * pow(ant_w0y*period/2., 2) ) ); //ant_phase = .0; <<--- extra phase-term antPhaseTerms[ii][jj] = -antPhase_x -antPhase_y -antPhaseCurve_x -antPhaseCurve_y -antPhaseGouy_x -antPhaseGouy_y; } } return EXIT_SUCCESS; }//}}} int make_density_profile( int ne_profile, double period, int d_absorb, double cntrl_para, size_t N_x, size_t N_y, size_t N_z, double n_e[N_x/2][N_y/2][N_z/2] ) { //{{{ size_t ii, jj, kk, ne_start_z; double ne_max, ne_k0Ln, aux; // if density is larger than this value, FDTD code becomes instable ne_max = period * 2./5.; if ( ne_profile == 1 ) { // plasma mirror for (ii=0 ; ii<(N_x/2) ; ++ii) { for (jj=0 ; jj<(N_y/2) ; ++jj) { //for (kk=((N_z-d_absorb-4)/2) ; kk<(N_z/2) ; ++kk) { for (kk=0 ; kk<(N_z/2) ; ++kk) { // z = m*y + b = delta_z/delta_y * y + z_start // = cntrl_para * y + z_start if ( (double)kk > (cntrl_para*(double)jj + ((double)N_z-(double)d_absorb-4.)/2.) ) { n_e[ii][jj][kk] = ne_max; } } } } } else if ( ne_profile == 2 ) { // linearly increasing profile with k0Ln as slope // n_e(z) = m*z // with m = 2*pi/(k0Ln*lambda) // z = z-starting_position //ne_start_z = (d_absorb + period)/2; ne_start_z = (d_absorb + 224.155)/2; // benchmark scenario from STEP project: .15m/l_0*period if (ne_start_z%2 != 0) ne_start_z += 1; ne_k0Ln = cntrl_para; printf( "make_density_profile: ne_profile = %d, ne_start_z = %ld, k0Ln = %f\n", ne_profile, ne_start_z, ne_k0Ln ); for (ii=0 ; ii<(N_x/2) ; ++ii) { for (jj=0 ; jj<(N_y/2) ; ++jj) { for (kk=0 ; kk<(N_z/2) ; ++kk) { aux = ((double)kk - (double)ne_start_z) * (2.*M_PI / (ne_k0Ln*period)); // negative density values are unphysical if (aux < .0) aux = .0; // FDTD full-wave codes only allow for a maximum density value if (aux > ne_max) { aux = ne_max; printf( " maximum density achieved (ii, jj, kk = %ld, %ld, %ld): %f\n", ii, jj, kk, aux ); } //if ((ii%10 == 0) && (jj%10 == 0) && (kk%10 == 0)) //if (kk%10 == 0) // printf( " n_e[%4ld][%4ld][%4ld] = %f\n", ii, jj, kk, aux ); n_e[ii][jj][kk] = aux; } } } } else if ( ne_profile == 3 ) { // plasma cylinder (2D gauss) in center of grid // // (y-y0)^2 (z-z0)^2 // n_e(y,z) = n_e,max * exp( -( ------------ + ------------- ) ) // 2*sigma_y^2 2*sigma_z^2 // for (ii=0 ; ii<(N_x/2) ; ++ii) { for (jj=0 ; jj<(N_y/2) ; ++jj) { for (kk=0 ; kk<(N_z/2) ; ++kk) { n_e[ii][jj][kk] = exp( -1.* ( pow((double)jj-(double)N_y/4., 2)/(2*pow(period/2.,2)) +pow((double)kk-(double)N_z/4., 2)/(2*pow(period/2.,2)) )) * 5.; } } } } else if ( ne_profile == 4 ) { // same as ne_profile = 3, but plasma cylinder is now along y for (ii=0 ; ii<(N_x/2) ; ++ii) { for (jj=0 ; jj<(N_y/2) ; ++jj) { for (kk=0 ; kk<(N_z/2) ; ++kk) { n_e[ii][jj][kk] = exp( -1.* ( pow((double)ii-(double)N_x/4., 2)/(2*pow(period/2.,2)) +pow((double)kk-(double)N_z/4., 2)/(2*pow(period/2.,2)) )) * 2;//5.; } } } } return EXIT_SUCCESS; }//}}} int set_densityInAbsorber_v2( double period, int d_absorb, char absorber[], size_t N_x, size_t N_y, size_t N_z, double n_e[N_x/2][N_y/2][N_z/2] ) { //{{{ double x, y, z, x0,x1, y0,y1, z0,z1, ne_absorb, // electron density in absorber smooth, // 1: relatively steep, .2: more smooth ne_dist, // proportional to distance to absorber, where n_e starts to decrease scale_fact; ne_absorb = .0; smooth = .5;//.2; ne_dist = round( period/1 ); x0 = (double)d_absorb + ne_dist; x1 = (double)N_x - (d_absorb + ne_dist); y0 = (double)d_absorb + ne_dist; y1 = (double)N_y - (d_absorb + ne_dist); z0 = (double)d_absorb + ne_dist; z1 = (double)N_z - (d_absorb + ne_dist); // scale to density grid which is only half the size of FDTD-wavefields grid // since 2 numerical grid points correspond to one "physical" grid point // and we want not to vary the background parameters within one physical // grid point x0 = round(x0*.5); x1 = round(x1*.5); y0 = round(y0*.5); y1 = round(y1*.5); z0 = round(z0*.5); z1 = round(z1*.5); // the string "absorber" is used to set in which absorber n_e will be modified // the comparison is done with the strstr() function, which return the address // of the substring if found, NULL otherwise // set density in x0 absorber if ( strstr(absorber,"x1") ) { for ( x=0; x<(N_x/2) ; ++x ) { scale_fact = +.5*( tanh(smooth*(x-x0)) + 1); // x0 boundary //printf( "x1: x=%.1f, scale_fact=%f\n", x, scale_fact) ; for ( y=0. ; y<(N_y/2) ; ++y ) { for (z=0 ; z<(N_z/2) ; ++z) { n_e[(int)x][(int)y][(int)z] *= scale_fact; } } } } // set density in x1 absorber if ( strstr(absorber,"x2") ) { for ( x=0; x<(N_x/2) ; ++x ) { scale_fact = +.5*(-1.*tanh(smooth*(x-x1)) + 1); // x1 boundary //printf( "x2: x=%.1f, scale_fact=%f\n", x, scale_fact) ; for ( y=0. ; y<(N_y/2) ; ++y ) { for (z=0 ; z<(N_z/2) ; ++z) { n_e[(int)x][(int)y][(int)z] *= scale_fact; } } } } // set density in y0 absorber if ( strstr(absorber,"y1") ) { for ( y=0; y<(N_y/2) ; ++y ) { scale_fact = +.5*( tanh(smooth*(y-y0)) + 1); // y0 boundary //printf( "y1: y=%.1f, scale_fact=%f\n", y, scale_fact) ; for ( x=0; x<(N_x/2) ; ++x ) { for (z=0 ; z<(N_z/2) ; ++z) { n_e[(int)x][(int)y][(int)z] *= scale_fact; } } } } // set density in y1 absorber if ( strstr(absorber,"y2") ) { for ( y=0; y<(N_y/2) ; ++y ) { scale_fact = +.5*(-1.*tanh(smooth*(y-y1)) + 1); // y1 boundary //printf( "y2: y=%.1f, scale_fact=%f\n", y, scale_fact) ; for ( x=0; x<(N_x/2) ; ++x ) { for (z=0 ; z<(N_z/2) ; ++z) { n_e[(int)x][(int)y][(int)z] *= scale_fact; } } } } // set density in z0 absorber if ( strstr(absorber,"z1") ) { for ( z=0 ; z<(N_z/2) ; ++z) { scale_fact = +.5*( tanh(smooth*(z-z0)) + 1); // z0 boundary //printf( "z1: z=%.1f, scale_fact=%f\n", z, scale_fact) ; for ( x=0; x<(N_x/2) ; ++x ) { for ( y=0; y<(N_y/2) ; ++y ) { n_e[(int)x][(int)y][(int)z] *= scale_fact; } } } } // set density in z1 absorber if ( strstr(absorber,"z2") ) { for ( z=0 ; z<(N_z/2) ; ++z) { scale_fact = +.5*(-1.*tanh(smooth*(z-z1)) + 1); // z1 boundary //printf( "z2: z=%.1f, scale_fact=%f\n", z, scale_fact) ; for ( x=0; x<(N_x/2) ; ++x ) { for ( y=0; y<(N_y/2) ; ++y ) { n_e[(int)x][(int)y][(int)z] *= scale_fact; } } } } return EXIT_SUCCESS; } //}}} int make_B0_profile( int B0_profile, double period, int d_absorb, double cntrl_para, size_t N_x, size_t N_y, size_t N_z, double J_B0[N_x][N_y][N_z] ) { //{{{ size_t ii, jj, kk; // double // aux; // B0x: even-odd-odd // B0y: odd-even-odd // B0z: odd-odd-even if ( B0_profile == 1 ) { // constant field for (ii=0 ; ii<N_x ; ii+=2) { for (jj=0 ; jj<N_y ; jj+=2) { for (kk=0 ; kk<N_z ; kk+=2) { J_B0[ii ][jj+1][kk+1] = cntrl_para; J_B0[ii+1][jj ][kk+1] = cntrl_para*.0; J_B0[ii+1][jj+1][kk ] = cntrl_para*.0; } } } } return EXIT_SUCCESS; }//}}} int add_source( int exc_signal, size_t N_x, size_t N_y, size_t N_z, double period, int ant_z, int t_int, double omega_t, double antField_xy[N_x/2][N_y/2], double antPhaseTerms[N_x/2][N_y/2], double EB_WAVE[N_x][N_y][N_z] ) { //{{{ size_t ii, jj; double t_rise, source; if ( exc_signal == 1 ) { t_rise = 1. - exp( -1*pow( ((double)(t_int)/period), 2 )/100. ); #pragma omp parallel for collapse(2) default(shared) private(ii, jj, source) for ( ii=2 ; ii<N_x ; ii+=2 ) { for ( jj=2 ; jj<N_y ; jj+=2 ) { // note: for X-mode injection, switch cos and sin of source_1 and source_2 //source = sin(omega_t - aux - curve + GouyPhase_beam + ant_phase/180.*M_PI ) * t_rise * antField_xy[(ii/2)][(jj/2)] ; source = sin(omega_t + antPhaseTerms[(ii/2)][(jj/2)]) * t_rise * antField_xy[(ii/2)][(jj/2)] ; // Ex EB_WAVE[ii+1][jj ][ant_z] += source; } } } else if ( exc_signal == 2) { t_rise = 1. - exp( -1*pow( ((double)(t_int)/period), 2 )/100. ); #pragma omp parallel for collapse(2) default(shared) private(ii, jj, source) for ( ii=2 ; ii<N_x ; ii+=2 ) { for ( jj=2 ; jj<N_y ; jj+=2 ) { source = sin(omega_t + antPhaseTerms[(ii/2)][(jj/2)]) * t_rise * antField_xy[(ii/2)][(jj/2)] ; // Bx EB_WAVE[ii ][jj+1][ant_z+1] += source; } } } else if ( exc_signal == 3) { t_rise = 1. - exp( -1*pow( ((double)(t_int)/period), 2 )/100. ); #pragma omp parallel for collapse(2) default(shared) private(ii, jj, source) for ( ii=2 ; ii<N_x ; ii+=2 ) { for ( jj=2 ; jj<N_y ; jj+=2 ) { // note: for X-mode injection, switch cos and sin of source_1 and source_2 source = sin(omega_t + antPhaseTerms[(ii/2)][(jj/2)]) * t_rise * antField_xy[(ii/2)][(jj/2)] ; // Ex EB_WAVE[ii+1][jj ][ant_z] += source; source = sin(omega_t + antPhaseTerms[(ii/2)][(jj/2)] + M_PI/2.) * t_rise * antField_xy[(ii/2)][(jj/2)] ; // Bx EB_WAVE[ii ][jj+1][ant_z+1] += source*(1.41); } } } return EXIT_SUCCESS; }//}}} int apply_absorber( size_t N_x, size_t N_y, size_t N_z, int d_absorb, double eco, double EB_WAVE[N_x][N_y][N_z] ) { //{{{ size_t ii, jj, kk; double damp; // z1 absorber: z=0...d_absorb //#pragma omp parallel for collapse(2) default(shared) private(k,j,damp) <-- can collapse be used here? #pragma omp parallel for default(shared) private(ii,jj,kk,damp) for (ii=2 ; ii<N_x-2 ; ii+=2) { for (jj=2 ; jj<N_y-2 ; jj+=2) { for (kk=2 ; kk<d_absorb-2 ; kk+=2) { damp = ((double)kk-(double)d_absorb)/(double)d_absorb; damp = ABSORBER_DAMPING(eco,damp); EB_WAVE[ii+1][jj ][kk ] *= damp; EB_WAVE[ii ][jj+1][kk ] *= damp; EB_WAVE[ii ][jj ][kk+1] *= damp; // if ((ii%10 == 0) && (jj%10 == 0) && (kk%10 == 0)) // printf( "z1: ii=%3d, jj=%3d, kk=%3d, (kk-d_abs)/d_abs=%f, damp=%f\n", // ii, jj, kk, ((double)kk-(double)d_absorb)/(double)d_absorb, damp ); } } } // z2 absorber: z=d_absorb...NZ #pragma omp parallel for default(shared) private(ii,jj,kk,damp) for (ii=2 ; ii<N_x-2 ; ii+=2) { for (jj=2 ; jj<N_y-2 ; jj+=2) { for (kk=(N_z-d_absorb) ; kk<N_z-2 ; kk+=2) { //NZ-d_absorb-2 ??? damp = ((double)kk-((double)N_z-(double)d_absorb))/(double)d_absorb; damp = ABSORBER_DAMPING(eco,damp); EB_WAVE[ii+1][jj ][kk ] *= damp; EB_WAVE[ii ][jj+1][kk ] *= damp; EB_WAVE[ii ][jj ][kk+1] *= damp; } } } // x1 absorber: x=0...d_absorb #pragma omp parallel for default(shared) private(ii,jj,kk,damp) for (jj=2 ; jj<N_y-2 ; jj+=2) { for (kk=2 ; kk<N_z-2 ; kk+=2) { for (ii=2 ; ii<d_absorb-2 ; ii+=2) { damp = ((double)ii-(double)d_absorb)/(double)d_absorb; damp = ABSORBER_DAMPING(eco,damp); EB_WAVE[ii+1][jj ][kk ] *= damp; EB_WAVE[ii ][jj+1][kk ] *= damp; EB_WAVE[ii ][jj ][kk+1] *= damp; } } } // x2 absorber: x=d_absorb...NX #pragma omp parallel for default(shared) private(ii,jj,kk,damp) for (jj=2 ; jj<N_y-2 ; jj+=2) { for (kk=2 ; kk<N_z-2 ; kk+=2) { for (ii=(N_x-d_absorb) ; ii<N_x-2 ; ii+=2) { //NX-d_absorb-2 ??? damp = ((double)ii-((double)N_x-(double)d_absorb))/(double)d_absorb; damp = ABSORBER_DAMPING(eco,damp); EB_WAVE[ii+1][jj ][kk ] *= damp; EB_WAVE[ii ][jj+1][kk ] *= damp; EB_WAVE[ii ][jj ][kk+1] *= damp; } } } // y1 absorber: y=0...d_absorb #pragma omp parallel for default(shared) private(ii,jj,kk,damp) for (ii=2 ; ii<N_x-2 ; ii+=2) { for (kk=2 ; kk<N_z-2 ; kk+=2) { for (jj=2 ; jj<d_absorb-2 ; jj+=2) { damp = ((double)jj-(double)d_absorb)/(double)d_absorb; damp = ABSORBER_DAMPING(eco,damp); EB_WAVE[ii+1][jj ][kk ] *= damp; EB_WAVE[ii ][jj+1][kk ] *= damp; EB_WAVE[ii ][jj ][kk+1] *= damp; } } } // y2 absorber: y=d_absorb...NY #pragma omp parallel for default(shared) private(ii,jj,kk,damp) for (ii=2 ; ii<N_x-2 ; ii+=2) { for (kk=2 ; kk<N_z-2 ; kk+=2) { for (jj=(N_y-d_absorb) ; jj<N_y-2 ; jj+=2) { //NY-d_absorb-2 ??? damp = ((double)jj-((double)N_y-(double)d_absorb))/(double)d_absorb; damp = ABSORBER_DAMPING(eco,damp); EB_WAVE[ii+1][jj ][kk ] *= damp; EB_WAVE[ii ][jj+1][kk ] *= damp; EB_WAVE[ii ][jj ][kk+1] *= damp; } } } return EXIT_SUCCESS; }//}}} int apply_absorber_v2( size_t N_x, size_t N_y, size_t N_z, int d_absorb, double eco, char absorber[], double EB_WAVE[N_x][N_y][N_z] ) { //{{{ size_t ii, jj, kk; double damp; // z1 absorber: z=0...d_absorb //#pragma omp parallel for collapse(2) default(shared) private(k,j,damp) <-- can collapse be used here? if ( strstr(absorber,"z1") ) { #pragma omp parallel for default(shared) private(ii,jj,kk,damp) for (ii=2 ; ii<N_x-2 ; ii+=2) { for (jj=2 ; jj<N_y-2 ; jj+=2) { for (kk=2 ; kk<d_absorb-2 ; kk+=2) { damp = ((double)kk-(double)d_absorb)/(double)d_absorb; damp = ABSORBER_DAMPING(eco,damp); EB_WAVE[ii+1][jj ][kk ] *= damp; EB_WAVE[ii ][jj+1][kk ] *= damp; EB_WAVE[ii ][jj ][kk+1] *= damp; // if ((ii%10 == 0) && (jj%10 == 0) && (kk%10 == 0)) // printf( "z1: ii=%3d, jj=%3d, kk=%3d, (kk-d_abs)/d_abs=%f, damp=%f\n", // ii, jj, kk, ((double)kk-(double)d_absorb)/(double)d_absorb, damp ); } } } } // z2 absorber: z=d_absorb...NZ if ( strstr(absorber,"z2") ) { #pragma omp parallel for default(shared) private(ii,jj,kk,damp) for (ii=2 ; ii<N_x-2 ; ii+=2) { for (jj=2 ; jj<N_y-2 ; jj+=2) { for (kk=(N_z-d_absorb) ; kk<N_z-2 ; kk+=2) { //NZ-d_absorb-2 ??? damp = ((double)kk-((double)N_z-(double)d_absorb))/(double)d_absorb; damp = ABSORBER_DAMPING(eco,damp); EB_WAVE[ii+1][jj ][kk ] *= damp; EB_WAVE[ii ][jj+1][kk ] *= damp; EB_WAVE[ii ][jj ][kk+1] *= damp; } } } } // x1 absorber: x=0...d_absorb if ( strstr(absorber,"x1") ) { #pragma omp parallel for default(shared) private(ii,jj,kk,damp) for (jj=2 ; jj<N_y-2 ; jj+=2) { for (kk=2 ; kk<N_z-2 ; kk+=2) { for (ii=2 ; ii<d_absorb-2 ; ii+=2) { damp = ((double)ii-(double)d_absorb)/(double)d_absorb; damp = ABSORBER_DAMPING(eco,damp); EB_WAVE[ii+1][jj ][kk ] *= damp; EB_WAVE[ii ][jj+1][kk ] *= damp; EB_WAVE[ii ][jj ][kk+1] *= damp; } } } } if ( strstr(absorber,"x2") ) { // x2 absorber: x=d_absorb...NX #pragma omp parallel for default(shared) private(ii,jj,kk,damp) for (jj=2 ; jj<N_y-2 ; jj+=2) { for (kk=2 ; kk<N_z-2 ; kk+=2) { for (ii=(N_x-d_absorb) ; ii<N_x-2 ; ii+=2) { //NX-d_absorb-2 ??? damp = ((double)ii-((double)N_x-(double)d_absorb))/(double)d_absorb; damp = ABSORBER_DAMPING(eco,damp); EB_WAVE[ii+1][jj ][kk ] *= damp; EB_WAVE[ii ][jj+1][kk ] *= damp; EB_WAVE[ii ][jj ][kk+1] *= damp; } } } } // y1 absorber: y=0...d_absorb if ( strstr(absorber,"y1") ) { #pragma omp parallel for default(shared) private(ii,jj,kk,damp) for (ii=2 ; ii<N_x-2 ; ii+=2) { for (kk=2 ; kk<N_z-2 ; kk+=2) { for (jj=2 ; jj<d_absorb-2 ; jj+=2) { damp = ((double)jj-(double)d_absorb)/(double)d_absorb; damp = ABSORBER_DAMPING(eco,damp); EB_WAVE[ii+1][jj ][kk ] *= damp; EB_WAVE[ii ][jj+1][kk ] *= damp; EB_WAVE[ii ][jj ][kk+1] *= damp; } } } } // y2 absorber: y=d_absorb...NY if ( strstr(absorber,"y2") ) { #pragma omp parallel for default(shared) private(ii,jj,kk,damp) for (ii=2 ; ii<N_x-2 ; ii+=2) { for (kk=2 ; kk<N_z-2 ; kk+=2) { for (jj=(N_y-d_absorb) ; jj<N_y-2 ; jj+=2) { //NY-d_absorb-2 ??? damp = ((double)jj-((double)N_y-(double)d_absorb))/(double)d_absorb; damp = ABSORBER_DAMPING(eco,damp); EB_WAVE[ii+1][jj ][kk ] *= damp; EB_WAVE[ii ][jj+1][kk ] *= damp; EB_WAVE[ii ][jj ][kk+1] *= damp; } } } } return EXIT_SUCCESS; }//}}} int abc_Mur_saveOldE_xdir( size_t N_x, size_t N_y, size_t N_z, double EB_WAVE[N_x][N_y][N_z], double E_old[8][N_y][N_z] ) { //{{{ // Ex: odd-even-even // Ey: even-odd-even // Ez: even-even-odd size_t jj, kk, offset; offset = 2; #pragma omp parallel for collapse(2) default(shared) private(jj,kk) for (jj=2 ; jj<N_y-2 ; jj+=2) { for (kk=2 ; kk<N_z-2 ; kk+=2) { // store values at x=0 and x=1 // Ex: odd-even-even E_old[0+1][jj ][kk ] = EB_WAVE[0+offset+1][jj ][kk ]; E_old[2+1][jj ][kk ] = EB_WAVE[2+offset+1][jj ][kk ]; // Ey: even-odd-even E_old[0 ][jj+1][kk ] = EB_WAVE[0+offset ][jj+1][kk ]; E_old[2 ][jj+1][kk ] = EB_WAVE[2+offset ][jj+1][kk ]; // Ez: even-even-odd E_old[0 ][jj ][kk+1] = EB_WAVE[0+offset ][jj ][kk+1]; E_old[2 ][jj ][kk+1] = EB_WAVE[2+offset ][jj ][kk+1]; // store values at x=Nx-1 and x=Nx-2 // Ex: odd-even-even E_old[4+1][jj ][kk ] = EB_WAVE[N_x-4-offset+1][jj ][kk ]; E_old[6+1][jj ][kk ] = EB_WAVE[N_x-2-offset+1][jj ][kk ]; // Ey: even-odd-even E_old[4 ][jj+1][kk ] = EB_WAVE[N_x-4-offset ][jj+1][kk ]; E_old[6 ][jj+1][kk ] = EB_WAVE[N_x-2-offset ][jj+1][kk ]; // Ez: even-even-odd E_old[4 ][jj ][kk+1] = EB_WAVE[N_x-4-offset ][jj ][kk+1]; E_old[6 ][jj ][kk+1] = EB_WAVE[N_x-2-offset ][jj ][kk+1]; } } return EXIT_SUCCESS; }//}}} int abc_Mur_saveOldE_ydir( size_t N_x, size_t N_y, size_t N_z, double EB_WAVE[N_x][N_y][N_z], double E_old[N_y][8][N_z] ) { //{{{ // Ex: odd-even-even // Ey: even-odd-even // Ez: even-even-odd size_t ii, kk, offset; offset = 2; #pragma omp parallel for collapse(2) default(shared) private(ii,kk) for (ii=2 ; ii<N_x-2 ; ii+=2) { for (kk=2 ; kk<N_z-2 ; kk+=2) { // store values at y=0 and y=1 // Ex: odd-even-even E_old[ii+1][0 ][kk ] = EB_WAVE[ii+1][0+offset ][kk ]; E_old[ii+1][2 ][kk ] = EB_WAVE[ii+1][2+offset ][kk ]; // Ey: even-odd-even E_old[ii ][0+1][kk ] = EB_WAVE[ii ][0+offset+1][kk ]; E_old[ii ][2+1][kk ] = EB_WAVE[ii ][2+offset+1][kk ]; // Ez: even-even-odd E_old[ii ][0 ][kk+1] = EB_WAVE[ii ][0+offset ][kk+1]; E_old[ii ][2 ][kk+1] = EB_WAVE[ii ][2+offset ][kk+1]; // store values at x=Nx-1 and x=Nx-2 // Ex: odd-even-even E_old[ii+1][4 ][kk ] = EB_WAVE[ii+1][N_y-4-offset ][kk ]; E_old[ii+1][6 ][kk ] = EB_WAVE[ii+1][N_y-2-offset ][kk ]; // Ey: even-odd-even E_old[ii ][4+1][kk ] = EB_WAVE[ii ][N_y-4-offset+1][kk ]; E_old[ii ][6+1][kk ] = EB_WAVE[ii ][N_y-2-offset+1][kk ]; // Ez: even-even-odd E_old[ii ][4 ][kk+1] = EB_WAVE[ii ][N_y-4-offset ][kk+1]; E_old[ii ][6 ][kk+1] = EB_WAVE[ii ][N_y-2-offset ][kk+1]; } } return EXIT_SUCCESS; }//}}} int abc_Mur_saveOldE_zdir( size_t N_x, size_t N_y, size_t N_z, double EB_WAVE[N_x][N_y][N_z], double E_old[N_x][N_y][8] ) { //{{{ // Ex: odd-even-even // Ey: even-odd-even // Ez: even-even-odd size_t ii, jj, offset; offset = 2; #pragma omp parallel for collapse(2) default(shared) private(ii,jj) for (ii=2 ; ii<N_x-2 ; ii+=2) { for (jj=2 ; jj<N_y-2 ; jj+=2) { // store values at z=0 and z=1 // Ex: odd-even-even E_old[ii+1][jj ][0 ] = EB_WAVE[ii+1][jj ][0+offset ]; E_old[ii+1][jj ][2 ] = EB_WAVE[ii+1][jj ][2+offset ]; // Ey: even-odd-even E_old[ii ][jj+1][0 ] = EB_WAVE[ii ][jj+1][0+offset ]; E_old[ii ][jj+1][2 ] = EB_WAVE[ii ][jj+1][2+offset ]; // Ez: even-even-odd E_old[ii ][jj ][0+1] = EB_WAVE[ii ][jj ][0+offset+1]; E_old[ii ][jj ][2+1] = EB_WAVE[ii ][jj ][2+offset+1]; // store values at z=Nz-1 and z=Nz-2 // Ex: odd-even-even E_old[ii+1][jj ][4 ] = EB_WAVE[ii+1][jj ][N_z-4-offset ]; E_old[ii+1][jj ][6 ] = EB_WAVE[ii+1][jj ][N_z-2-offset ]; // Ey: even-odd-even E_old[ii ][jj+1][4 ] = EB_WAVE[ii ][jj+1][N_z-4-offset ]; E_old[ii ][jj+1][6 ] = EB_WAVE[ii ][jj+1][N_z-2-offset ]; // Ez: even-even-odd E_old[ii ][jj ][4+1] = EB_WAVE[ii ][jj ][N_z-4-offset+1]; E_old[ii ][jj ][6+1] = EB_WAVE[ii ][jj ][N_z-2-offset+1]; } } return EXIT_SUCCESS; }//}}} int abs_Mur_1st( size_t N_x, size_t N_y, size_t N_z, double dt, double dx, double EB_WAVE[N_x][N_y][N_z], double E_old_xdir[8][N_y][N_z], double E_old_ydir[N_x][8][N_z], double E_old_zdir[N_x][N_y][8] ) { //{{{ // Ex: odd-even-even // Ey: even-odd-even // Ez: even-even-odd size_t ii, jj, kk, offset; // refers to EB_WAVE only double cnst; cnst = (dt-dx)/(dt+dx); offset = 2; // absorber into x-direction #pragma omp parallel for collapse(2) default(shared) private(jj,kk) for (jj=2 ; jj<N_y-2 ; jj+=2) { for (kk=2 ; kk<N_z-2 ; kk+=2) { // absorber at x=0 grid boundary // Ex: odd-even-even EB_WAVE[offset+0+1][jj ][kk ] = E_old_xdir[2+1][jj ][kk ] + cnst * ( EB_WAVE[offset+2+1][jj ][kk ] -E_old_xdir[0+1 ][jj ][kk ] ); // Ey: even-odd-even EB_WAVE[offset+0 ][jj+1][kk ] = E_old_xdir[2 ][jj+1][kk ] + cnst * ( EB_WAVE[offset+2 ][jj+1][kk ] -E_old_xdir[0 ][jj+1][kk ] ); // Ez: even-even-odd EB_WAVE[offset+0 ][jj ][kk+1] = E_old_xdir[2 ][jj ][kk+1] + cnst * ( EB_WAVE[offset+2 ][jj ][kk+1] -E_old_xdir[0 ][jj ][kk+1] ); // absorber at x=Nx grid boundary // Ex: odd-even-even EB_WAVE[N_x-2-offset+1][jj ][kk ] = E_old_xdir[4+1][jj ][kk ] + cnst * ( EB_WAVE[N_x-4-offset+1][jj ][kk ] -E_old_xdir[6+1 ][jj ][kk ] ); // Ey: even-odd-even EB_WAVE[N_x-2-offset ][jj+1][kk ] = E_old_xdir[4 ][jj+1][kk ] + cnst * ( EB_WAVE[N_x-4-offset ][jj+1][kk ] -E_old_xdir[6 ][jj+1][kk ] ); // Ez: even-even-odd EB_WAVE[N_x-2-offset ][jj ][kk+1] = E_old_xdir[4 ][jj ][kk+1] + cnst * ( EB_WAVE[N_x-4-offset ][jj ][kk+1] -E_old_xdir[6 ][jj ][kk+1] ); } } // absorber into y-direction #pragma omp parallel for collapse(2) default(shared) private(ii,kk) for (ii=2 ; ii<N_x-2 ; ii+=2) { for (kk=2 ; kk<N_z-2 ; kk+=2) { // absorber at y=0 grid boundary // Ex: odd-even-even EB_WAVE[ii+1][offset+0 ][kk ] = E_old_ydir[ii+1][2 ][kk ] + cnst * ( EB_WAVE[ii+1][offset+2 ][kk ] -E_old_ydir[ii+1][0 ][kk ] ); // Ey: even-odd-even EB_WAVE[ii ][offset+0+1][kk ] = E_old_ydir[ii ][2+1][kk ] + cnst * ( EB_WAVE[ii ][offset+2+1][kk ] -E_old_ydir[ii ][0+1 ][kk ] ); // Ez: even-even-odd EB_WAVE[ii ][offset+0 ][kk+1] = E_old_ydir[ii ][2 ][kk+1] + cnst * ( EB_WAVE[ii ][offset+2 ][kk+1] -E_old_ydir[ii ][0 ][kk+1] ); // absorber at y=Ny grid boundary // Ex: odd-even-even EB_WAVE[ii+1][N_y-2-offset ][kk ] = E_old_ydir[ii+1][4 ][kk ] + cnst * ( EB_WAVE[ii+1][N_y-4-offset ][kk ] -E_old_ydir[ii+1][6 ][kk ] ); // Ey: even-odd-even EB_WAVE[ii ][N_y-2-offset+1][kk ] = E_old_ydir[ii ][4+1][kk ] + cnst * ( EB_WAVE[ii ][N_y-4-offset+1][kk ] -E_old_ydir[ii ][6+1 ][kk ] ); // Ez: even-even-odd EB_WAVE[ii ][N_y-2-offset ][kk+1] = E_old_ydir[ii ][4 ][kk+1] + cnst * ( EB_WAVE[ii ][N_y-4-offset ][kk+1] -E_old_ydir[ii ][6 ][kk+1] ); } } // absorber into z-direction #pragma omp parallel for collapse(2) default(shared) private(ii,jj) for (ii=2 ; ii<N_x-2 ; ii+=2) { for (jj=2 ; jj<N_y-2 ; jj+=2) { // absorber at z=0 grid boundary // Ex: odd-even-even EB_WAVE[ii+1][jj ][offset+0] = E_old_zdir[ii+1][jj ][2 ] + cnst * ( EB_WAVE[ii+1][jj ][offset+2 ] -E_old_zdir[ii+1][jj ][0 ] ); // Ey: even-odd-even EB_WAVE[ii ][jj+1][offset+0] = E_old_zdir[ii ][jj+1][2 ] + cnst * ( EB_WAVE[ii ][jj+1][offset+2 ] -E_old_zdir[ii ][jj+1][0 ] ); // Ez: even-even-odd EB_WAVE[ii ][jj ][offset+0+1] = E_old_zdir[ii ][jj ][2+1] + cnst * ( EB_WAVE[ii ][jj ][offset+2+1] -E_old_zdir[ii ][jj ][0+1] ); // absorber at z=Nz grid boundary // Ex: odd-even-even EB_WAVE[ii+1][jj ][N_z-2-offset ] = E_old_zdir[ii+1][jj ][4 ] + cnst * ( EB_WAVE[ii+1][jj ][N_z-4-offset ] -E_old_zdir[ii+1][jj ][6 ] ); // Ey: even-odd-even EB_WAVE[ii ][jj+1][N_z-2-offset ] = E_old_zdir[ii ][jj+1][4 ] + cnst * ( EB_WAVE[ii ][jj+1][N_z-4-offset ] -E_old_zdir[ii ][jj+1][6 ] ); // Ez: even-even-odd EB_WAVE[ii ][jj ][N_z-2-offset+1] = E_old_zdir[ii ][jj ][4+1] + cnst * ( EB_WAVE[ii ][jj ][N_z-4-offset+1] -E_old_zdir[ii ][jj ][6+1] ); } } return EXIT_SUCCESS; } //}}} int abs_Mur_1st_v2( size_t N_x, size_t N_y, size_t N_z, double dt, double dx, char absorber[], double EB_WAVE[N_x][N_y][N_z], double E_old_xdir[8][N_y][N_z], double E_old_ydir[N_x][8][N_z], double E_old_zdir[N_x][N_y][8] ) { //{{{ // Ex: odd-even-even // Ey: even-odd-even // Ez: even-even-odd size_t ii, jj, kk, offset; // refers to EB_WAVE only double cnst; cnst = (dt-dx)/(dt+dx); offset = 2; // the string "absorber" is used to set which absorber is treated // the comparison is done with the strstr() function, which return the address // of the substring if found, NULL otherwise // NOTE: "if (strstr(absorber,"x1))" should be sufficient // "if (strstr(absorber,"x1) != NULL)" should be equivalent // absorber into x-direction if ( strstr(absorber,"x1") ) { //printf("abs_Mur_1st_v2: x1\n"); #pragma omp parallel for collapse(2) default(shared) private(jj,kk) for (jj=2 ; jj<N_y-2 ; jj+=2) { for (kk=2 ; kk<N_z-2 ; kk+=2) { // absorber at x=0 grid boundary // Ex: odd-even-even EB_WAVE[offset+0+1][jj ][kk ] = E_old_xdir[2+1][jj ][kk ] + cnst * ( EB_WAVE[offset+2+1][jj ][kk ] -E_old_xdir[0+1 ][jj ][kk ] ); // Ey: even-odd-even EB_WAVE[offset+0 ][jj+1][kk ] = E_old_xdir[2 ][jj+1][kk ] + cnst * ( EB_WAVE[offset+2 ][jj+1][kk ] -E_old_xdir[0 ][jj+1][kk ] ); // Ez: even-even-odd EB_WAVE[offset+0 ][jj ][kk+1] = E_old_xdir[2 ][jj ][kk+1] + cnst * ( EB_WAVE[offset+2 ][jj ][kk+1] -E_old_xdir[0 ][jj ][kk+1] ); } } } if ( strstr(absorber,"x2") ) { //printf("abs_Mur_1st_v2: x2\n"); #pragma omp parallel for collapse(2) default(shared) private(jj,kk) for (jj=2 ; jj<N_y-2 ; jj+=2) { for (kk=2 ; kk<N_z-2 ; kk+=2) { // absorber at x=Nx grid boundary // Ex: odd-even-even EB_WAVE[N_x-2-offset+1][jj ][kk ] = E_old_xdir[4+1][jj ][kk ] + cnst * ( EB_WAVE[N_x-4-offset+1][jj ][kk ] -E_old_xdir[6+1 ][jj ][kk ] ); // Ey: even-odd-even EB_WAVE[N_x-2-offset ][jj+1][kk ] = E_old_xdir[4 ][jj+1][kk ] + cnst * ( EB_WAVE[N_x-4-offset ][jj+1][kk ] -E_old_xdir[6 ][jj+1][kk ] ); // Ez: even-even-odd EB_WAVE[N_x-2-offset ][jj ][kk+1] = E_old_xdir[4 ][jj ][kk+1] + cnst * ( EB_WAVE[N_x-4-offset ][jj ][kk+1] -E_old_xdir[6 ][jj ][kk+1] ); } } } // absorber into y-direction if ( strstr(absorber,"y1") ) { //printf("abs_Mur_1st_v2: y1\n"); #pragma omp parallel for collapse(2) default(shared) private(ii,kk) for (ii=2 ; ii<N_x-2 ; ii+=2) { for (kk=2 ; kk<N_z-2 ; kk+=2) { // absorber at y=0 grid boundary // Ex: odd-even-even EB_WAVE[ii+1][offset+0 ][kk ] = E_old_ydir[ii+1][2 ][kk ] + cnst * ( EB_WAVE[ii+1][offset+2 ][kk ] -E_old_ydir[ii+1][0 ][kk ] ); // Ey: even-odd-even EB_WAVE[ii ][offset+0+1][kk ] = E_old_ydir[ii ][2+1][kk ] + cnst * ( EB_WAVE[ii ][offset+2+1][kk ] -E_old_ydir[ii ][0+1 ][kk ] ); // Ez: even-even-odd EB_WAVE[ii ][offset+0 ][kk+1] = E_old_ydir[ii ][2 ][kk+1] + cnst * ( EB_WAVE[ii ][offset+2 ][kk+1] -E_old_ydir[ii ][0 ][kk+1] ); } } } if ( strstr(absorber,"y2") ) { //printf("abs_Mur_1st_v2: y2\n"); #pragma omp parallel for collapse(2) default(shared) private(ii,kk) for (ii=2 ; ii<N_x-2 ; ii+=2) { for (kk=2 ; kk<N_z-2 ; kk+=2) { // absorber at y=Ny grid boundary // Ex: odd-even-even EB_WAVE[ii+1][N_y-2-offset ][kk ] = E_old_ydir[ii+1][4 ][kk ] + cnst * ( EB_WAVE[ii+1][N_y-4-offset ][kk ] -E_old_ydir[ii+1][6 ][kk ] ); // Ey: even-odd-even EB_WAVE[ii ][N_y-2-offset+1][kk ] = E_old_ydir[ii ][4+1][kk ] + cnst * ( EB_WAVE[ii ][N_y-4-offset+1][kk ] -E_old_ydir[ii ][6+1 ][kk ] ); // Ez: even-even-odd EB_WAVE[ii ][N_y-2-offset ][kk+1] = E_old_ydir[ii ][4 ][kk+1] + cnst * ( EB_WAVE[ii ][N_y-4-offset ][kk+1] -E_old_ydir[ii ][6 ][kk+1] ); } } } // absorber into z-direction if ( strstr(absorber,"z1") ) { //printf("abs_Mur_1st_v2: z1\n"); #pragma omp parallel for collapse(2) default(shared) private(ii,jj) for (ii=2 ; ii<N_x-2 ; ii+=2) { for (jj=2 ; jj<N_y-2 ; jj+=2) { // absorber at z=0 grid boundary // Ex: odd-even-even EB_WAVE[ii+1][jj ][offset+0] = E_old_zdir[ii+1][jj ][2 ] + cnst * ( EB_WAVE[ii+1][jj ][offset+2 ] -E_old_zdir[ii+1][jj ][0 ] ); // Ey: even-odd-even EB_WAVE[ii ][jj+1][offset+0] = E_old_zdir[ii ][jj+1][2 ] + cnst * ( EB_WAVE[ii ][jj+1][offset+2 ] -E_old_zdir[ii ][jj+1][0 ] ); // Ez: even-even-odd EB_WAVE[ii ][jj ][offset+0+1] = E_old_zdir[ii ][jj ][2+1] + cnst * ( EB_WAVE[ii ][jj ][offset+2+1] -E_old_zdir[ii ][jj ][0+1] ); } } } if ( strstr(absorber,"z2") ) { //printf("abs_Mur_1st_v2: z2\n"); #pragma omp parallel for collapse(2) default(shared) private(ii,jj) for (ii=2 ; ii<N_x-2 ; ii+=2) { for (jj=2 ; jj<N_y-2 ; jj+=2) { // absorber at z=Nz grid boundary // Ex: odd-even-even EB_WAVE[ii+1][jj ][N_z-2-offset ] = E_old_zdir[ii+1][jj ][4 ] + cnst * ( EB_WAVE[ii+1][jj ][N_z-4-offset ] -E_old_zdir[ii+1][jj ][6 ] ); // Ey: even-odd-even EB_WAVE[ii ][jj+1][N_z-2-offset ] = E_old_zdir[ii ][jj+1][4 ] + cnst * ( EB_WAVE[ii ][jj+1][N_z-4-offset ] -E_old_zdir[ii ][jj+1][6 ] ); // Ez: even-even-odd EB_WAVE[ii ][jj ][N_z-2-offset+1] = E_old_zdir[ii ][jj ][4+1] + cnst * ( EB_WAVE[ii ][jj ][N_z-4-offset+1] -E_old_zdir[ii ][jj ][6+1] ); } } } return EXIT_SUCCESS; } //}}} int advance_J( size_t dim1, size_t dim2, size_t dim3, double EB_WAVE[dim1][dim2][dim3], double J_B0[dim1][dim2][dim3], double n_e[dim1/2][dim2/2][dim3/2], double dt ) { //{{{ size_t ii, jj, kk; // Jx: odd-even-even // Jy: even-odd-even // Jz: even-even-odd // B0x: even-odd-odd // B0y: odd-even-odd // B0z: odd-odd-even //#pragma omp parallel for collapse(2) default(shared) private(k,j, Jx_tmp1,Jy_tmp1,Jz_tmp1, Jx_tmp2,Jy_tmp2,Jz_tmp2, Jx_tmp3,Jy_tmp3,Jz_tmp3 ) // collapse ??? #pragma omp parallel for collapse(3) default(shared) private(ii,jj,kk) for (ii=2 ; ii<dim1-2 ; ii+=2) { for (jj=2 ; jj<dim2-2 ; jj+=2) { for (kk=2 ; kk<dim3-2 ; kk+=2) { // Jx: odd-even-even J_B0[ii+1][jj ][kk ] += + dt*( pow(2*M_PI,2) * n_e[(ii/2)][(jj/2)][(kk/2)] * EB_WAVE[ii+1][jj ][kk ] - 2*M_PI * ( J_B0[ii ][jj+1][kk ]*J_B0[ii+1][jj+1][kk ] // +Jy*B0z -J_B0[ii ][jj ][kk+1]*J_B0[ii+1][jj ][kk+1] // -Jz*B0y ) ); // Jy: even-odd-even J_B0[ii ][jj+1][kk ] += + dt*( pow(2*M_PI,2) * n_e[(ii/2)][(jj/2)][(kk/2)] * EB_WAVE[ii ][jj+1][kk ] -2*M_PI * (-J_B0[ii+1][jj ][kk ]*J_B0[ii+1][jj+1][kk ] // -Jx*B0z +J_B0[ii ][jj ][kk+1]*J_B0[ii ][jj+1][kk+1] // +Jz*B0x ) ); // Jz: even-even-odd J_B0[ii ][jj ][kk+1] += + dt*( pow(2*M_PI,2) * n_e[(ii/2)][(jj/2)][(kk/2)] * EB_WAVE[ii ][jj ][kk+1] -2*M_PI * ( J_B0[ii+1][jj ][kk ]*J_B0[ii+1][jj ][kk+1] // +Jx*B0y -J_B0[ii ][jj+1][kk ]*J_B0[ii ][jj+1][kk+1] // -Jy*B0x ) ); } } } return EXIT_SUCCESS; }//}}} int advance_B( size_t dim1, size_t dim2, size_t dim3, double EB_WAVE[dim1][dim2][dim3], double dx, double dt ) { //{{{ size_t ii, jj, kk; #pragma omp parallel for collapse(3) default(shared) private(ii,jj,kk) for (ii=2 ; ii<dim1-2 ; ii+=2) { for (jj=2 ; jj<dim2-2 ; jj+=2) { for (kk=2 ; kk<dim3-2 ; kk+=2) { // -dBx/dt = dEz/dy - dEy/dz EB_WAVE[ii ][jj+1][kk+1] += -1.*dt/dx*( +EB_WAVE[ii ][jj+2][kk+1] - EB_WAVE[ii ][jj ][kk+1] -EB_WAVE[ii ][jj+1][kk+2] + EB_WAVE[ii ][jj+1][kk ] ); // -dBy/dt = dEx/dz - dEz/dx EB_WAVE[ii+1][jj ][kk+1] += -1.*dt/dx*( +EB_WAVE[ii+1][jj ][kk+2] - EB_WAVE[ii+1][jj ][kk ] -EB_WAVE[ii+2][jj ][kk+1] + EB_WAVE[ii ][jj ][kk+1] ); // -dBz/dt = dEy/dx - dEx/dy EB_WAVE[ii+1][jj+1][kk ] += -1.*dt/dx*( +EB_WAVE[ii+2][jj+1][kk ] - EB_WAVE[ii ][jj+1][kk ] -EB_WAVE[ii+1][jj+2][kk ] + EB_WAVE[ii+1][jj ][kk ] ); } } } return EXIT_SUCCESS; }//}}} int advance_E( size_t dim1, size_t dim2, size_t dim3, double EB_WAVE[dim1][dim2][dim3], double J_B0[dim1][dim2][dim3], double dx, double dt ) { //{{{ size_t ii, jj, kk; #pragma omp parallel for collapse(3) default(shared) private(ii,jj,kk) for (ii=2 ; ii<dim1-2 ; ii+=2) { for (jj=2 ; jj<dim2-2 ; jj+=2) { for (kk=2 ; kk<dim3-2 ; kk+=2) { // dEx/dt = (dBz/dy - dBy/dz) EB_WAVE[ii+1][jj ][kk ] += dt/dx*( +EB_WAVE[ii+1][jj+1][kk ] - EB_WAVE[ii+1][jj-1][kk ] -EB_WAVE[ii+1][jj ][kk+1] + EB_WAVE[ii+1][jj ][kk-1] ) - dt*J_B0[ii+1][jj ][kk ]; // dEy/dt = (dBx/dz - dBz/dx) EB_WAVE[ii ][jj+1][kk ] += dt/dx*( +EB_WAVE[ii ][jj+1][kk+1] - EB_WAVE[ii ][jj+1][kk-1] -EB_WAVE[ii+1][jj+1][kk ] + EB_WAVE[ii-1][jj+1][kk ] ) - dt*J_B0[ii ][jj+1][kk ]; // dEz/dt = (dBy/dx - dBx/dy) EB_WAVE[ii ][jj ][kk+1] += dt/dx*( +EB_WAVE[ii+1][jj ][kk+1] - EB_WAVE[ii-1][jj ][kk+1] -EB_WAVE[ii ][jj+1][kk+1] + EB_WAVE[ii ][jj-1][kk+1] ) - dt*J_B0[ii ][jj ][kk+1]; } } } return EXIT_SUCCESS; }//}}} int advance_E_vacuum( size_t dim1, size_t dim2, size_t dim3, double EB_WAVE[dim1][dim2][dim3], double dx, double dt ) { //{{{ size_t ii, jj, kk; #pragma omp parallel for collapse(3) default(shared) private(ii,jj,kk) for (ii=2 ; ii<dim1-2 ; ii+=2) { for (jj=2 ; jj<dim2-2 ; jj+=2) { for (kk=2 ; kk<dim3-2 ; kk+=2) { // dEx/dt = (dBz/dy - dBy/dz) EB_WAVE[ii+1][jj ][kk ] += dt/dx*( +EB_WAVE[ii+1][jj+1][kk ] - EB_WAVE[ii+1][jj-1][kk ] -EB_WAVE[ii+1][jj ][kk+1] + EB_WAVE[ii+1][jj ][kk-1] ); // dEy/dt = (dBx/dz - dBz/dx) EB_WAVE[ii ][jj+1][kk ] += dt/dx*( +EB_WAVE[ii ][jj+1][kk+1] - EB_WAVE[ii ][jj+1][kk-1] -EB_WAVE[ii+1][jj+1][kk ] + EB_WAVE[ii-1][jj+1][kk ] ); // dEz/dt = (dBy/dx - dBx/dy) EB_WAVE[ii ][jj ][kk+1] += dt/dx*( +EB_WAVE[ii+1][jj ][kk+1] - EB_WAVE[ii-1][jj ][kk+1] -EB_WAVE[ii ][jj+1][kk+1] + EB_WAVE[ii ][jj-1][kk+1] ); } } } return EXIT_SUCCESS; }//}}} int calc_poynt_1( size_t N_x, size_t N_y, size_t N_z, int pwr_dect, char absorber[], double poynt[3], double EB_WAVE[N_x][N_y][N_z] ) { //{{{ size_t ii, jj, kk; double poynt_x, poynt_y, poynt_z; poynt_x = .0; poynt_y = .0; poynt_z = .0; // P = E x H // Px = Ey*Hz - Ez*Hy // Py = Ez*Hx - Ex*Hz // Pz = Ex*Hy - Ey*Hx // Bx: even-odd-odd // By: odd-even-odd // Bz: odd-odd-even // Ex: odd-even-even // Ey: even-odd-even // Ez: even-even-odd if ( strcmp(absorber,"z1") == 0 ) { for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z1-plane poynt_x += ( EB_WAVE[ii ][jj+1][pwr_dect ] *EB_WAVE[ii+1][jj+1][pwr_dect ] -EB_WAVE[ii ][jj ][pwr_dect+1] *EB_WAVE[ii+1][jj ][pwr_dect+1] ); poynt_y += ( EB_WAVE[ii ][jj ][pwr_dect+1] *EB_WAVE[ii ][jj+1][pwr_dect+1] -EB_WAVE[ii+1][jj ][pwr_dect ] *EB_WAVE[ii+1][jj+1][pwr_dect ] ); poynt_z += ( EB_WAVE[ii+1][jj ][pwr_dect ] *EB_WAVE[ii+1][jj ][pwr_dect+1] -EB_WAVE[ii ][jj+1][pwr_dect ] *EB_WAVE[ii ][jj+1][pwr_dect+1] ); } } } else if ( strcmp(absorber,"z2") == 0 ) { for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z2-plane poynt_x += ( EB_WAVE[ii ][jj+1][N_z-pwr_dect ] *EB_WAVE[ii+1][jj+1][N_z-pwr_dect ] -EB_WAVE[ii ][jj ][N_z-pwr_dect-1] *EB_WAVE[ii+1][jj ][N_z-pwr_dect-1] ); poynt_y += ( EB_WAVE[ii ][jj ][N_z-pwr_dect-1] *EB_WAVE[ii ][jj+1][N_z-pwr_dect-1] -EB_WAVE[ii+1][jj ][N_z-pwr_dect ] *EB_WAVE[ii+1][jj+1][N_z-pwr_dect ] ); poynt_z += ( EB_WAVE[ii+1][jj ][N_z-pwr_dect ] *EB_WAVE[ii+1][jj ][N_z-pwr_dect-1] -EB_WAVE[ii ][jj+1][N_z-pwr_dect ] *EB_WAVE[ii ][jj+1][N_z-pwr_dect-1] ); } } } else if ( strcmp(absorber,"x1") == 0 ) { for (jj=pwr_dect ; jj<=(N_y-pwr_dect-2) ; jj+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // x1-plane poynt_x += ( EB_WAVE[pwr_dect ][jj+1][kk ] *EB_WAVE[pwr_dect+1][jj+1][kk ] -EB_WAVE[pwr_dect ][jj ][kk+1] *EB_WAVE[pwr_dect+1][jj ][kk+1] ); poynt_y += ( EB_WAVE[pwr_dect ][jj ][kk+1] *EB_WAVE[pwr_dect ][jj+1][kk+1] -EB_WAVE[pwr_dect+1][jj ][kk ] *EB_WAVE[pwr_dect+1][jj+1][kk ] ); poynt_z += ( EB_WAVE[pwr_dect+1][jj ][kk ] *EB_WAVE[pwr_dect+1][jj ][kk+1] -EB_WAVE[pwr_dect ][jj+1][kk ] *EB_WAVE[pwr_dect ][jj+1][kk+1] ); } } } else if ( strcmp(absorber,"x2") == 0 ) { for (jj=pwr_dect ; jj<=(N_y-pwr_dect-2) ; jj+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // x2-plane poynt_x += ( EB_WAVE[N_x-pwr_dect ][jj+1][kk ] *EB_WAVE[N_x-pwr_dect-1][jj+1][kk ] -EB_WAVE[N_x-pwr_dect ][jj ][kk+1] *EB_WAVE[N_x-pwr_dect-1][jj ][kk+1] ); poynt_y += ( EB_WAVE[N_x-pwr_dect ][jj ][kk+1] *EB_WAVE[N_x-pwr_dect ][jj+1][kk+1] -EB_WAVE[N_x-pwr_dect-1][jj ][kk ] *EB_WAVE[N_x-pwr_dect-1][jj+1][kk ] ); poynt_z += ( EB_WAVE[N_x-pwr_dect-1][jj ][kk ] *EB_WAVE[N_x-pwr_dect-1][jj ][kk+1] -EB_WAVE[N_x-pwr_dect ][jj+1][kk ] *EB_WAVE[N_x-pwr_dect ][jj+1][kk+1] ); } } } else if ( strcmp(absorber,"y1") == 0 ) { for (ii=pwr_dect ; ii<=(N_x-pwr_dect-2) ; ii+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // y1-plane poynt_x += ( EB_WAVE[ii ][pwr_dect+1][kk ] *EB_WAVE[ii+1][pwr_dect+1][kk ] -EB_WAVE[ii ][pwr_dect ][kk+1] *EB_WAVE[ii+1][pwr_dect ][kk+1] ); poynt_y += ( EB_WAVE[ii ][pwr_dect ][kk+1] *EB_WAVE[ii ][pwr_dect+1][kk+1] -EB_WAVE[ii+1][pwr_dect ][kk ] *EB_WAVE[ii+1][pwr_dect+1][kk ] ); poynt_z += ( EB_WAVE[ii+1][pwr_dect ][kk ] *EB_WAVE[ii+1][pwr_dect ][kk+1] -EB_WAVE[ii ][pwr_dect+1][kk ] *EB_WAVE[ii ][pwr_dect+1][kk+1] ); } } } else if ( strcmp(absorber,"y2") == 0 ) { for (ii=pwr_dect ; ii<=(N_x-pwr_dect-2) ; ii+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // y2-plane poynt_x += ( EB_WAVE[ii ][N_y-pwr_dect-1][kk ] *EB_WAVE[ii+1][N_y-pwr_dect-1][kk ] -EB_WAVE[ii ][N_y-pwr_dect ][kk+1] *EB_WAVE[ii+1][N_y-pwr_dect ][kk+1] ); poynt_y += ( EB_WAVE[ii ][N_y-pwr_dect ][kk+1] *EB_WAVE[ii ][N_y-pwr_dect-1][kk+1] -EB_WAVE[ii+1][N_y-pwr_dect ][kk ] *EB_WAVE[ii+1][N_y-pwr_dect-1][kk ] ); poynt_z += ( EB_WAVE[ii+1][N_y-pwr_dect ][kk ] *EB_WAVE[ii+1][N_y-pwr_dect ][kk+1] -EB_WAVE[ii ][N_y-pwr_dect-1][kk ] *EB_WAVE[ii ][N_y-pwr_dect-1][kk+1] ); } } } poynt[0] = poynt_x; poynt[1] = poynt_y; poynt[2] = poynt_z; return EXIT_SUCCESS; }//}}} double calc_poynt_2( size_t N_x, size_t N_y, size_t N_z, int pwr_dect, char absorber[], double EB_WAVE[N_x][N_y][N_z] ) { //{{{ size_t ii, jj, kk; double poynt; poynt = .0; // P = E x H // Px = Ey*Hz - Ez*Hy // Py = Ez*Hx - Ex*Hz // Pz = Ex*Hy - Ey*Hx // Bx: even-odd-odd // By: odd-even-odd // Bz: odd-odd-even // Ex: odd-even-even // Ey: even-odd-even // Ez: even-even-odd if ( strcmp(absorber,"z1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:poynt) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z1-plane // Pz = Ex*Hy - Ey*Hx poynt += ( EB_WAVE[ii+1][jj ][pwr_dect ] *EB_WAVE[ii+1][jj ][pwr_dect+1] -EB_WAVE[ii ][jj+1][pwr_dect ] *EB_WAVE[ii ][jj+1][pwr_dect+1] ); } } } else if ( strcmp(absorber,"z2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:poynt) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z2-plane // Pz = Ex*Hy - Ey*Hx poynt += ( EB_WAVE[ii+1][jj ][N_z-pwr_dect ] *EB_WAVE[ii+1][jj ][N_z-pwr_dect-1] -EB_WAVE[ii ][jj+1][N_z-pwr_dect ] *EB_WAVE[ii ][jj+1][N_z-pwr_dect-1] ); } } } else if ( strcmp(absorber,"x1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(jj,kk) reduction(+:poynt) for (jj=pwr_dect ; jj<=(N_y-pwr_dect-2) ; jj+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // x1-plane // Px = Ey*Hz - Ez*Hy poynt += ( EB_WAVE[pwr_dect ][jj+1][kk ] *EB_WAVE[pwr_dect+1][jj+1][kk ] -EB_WAVE[pwr_dect ][jj ][kk+1] *EB_WAVE[pwr_dect+1][jj ][kk+1] ); } } } else if ( strcmp(absorber,"x2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(jj,kk) reduction(+:poynt) for (jj=pwr_dect ; jj<=(N_y-pwr_dect-2) ; jj+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // x2-plane // Px = Ey*Hz - Ez*Hy poynt += ( EB_WAVE[N_x-pwr_dect ][jj+1][kk ] *EB_WAVE[N_x-pwr_dect-1][jj+1][kk ] -EB_WAVE[N_x-pwr_dect ][jj ][kk+1] *EB_WAVE[N_x-pwr_dect-1][jj ][kk+1] ); } } } else if ( strcmp(absorber,"y1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,kk) reduction(+:poynt) for (ii=pwr_dect ; ii<=(N_x-pwr_dect-2) ; ii+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // y1-plane // Py = Ez*Hx - Ex*Hz poynt += ( EB_WAVE[ii ][pwr_dect ][kk+1] *EB_WAVE[ii ][pwr_dect+1][kk+1] -EB_WAVE[ii+1][pwr_dect ][kk ] *EB_WAVE[ii+1][pwr_dect+1][kk ] ); } } } else if ( strcmp(absorber,"y2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,kk) reduction(+:poynt) for (ii=pwr_dect ; ii<=(N_x-pwr_dect-2) ; ii+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // y2-plane // Py = Ez*Hx - Ex*Hz poynt += ( EB_WAVE[ii ][N_y-pwr_dect ][kk+1] *EB_WAVE[ii ][N_y-pwr_dect-1][kk+1] -EB_WAVE[ii+1][N_y-pwr_dect ][kk ] *EB_WAVE[ii+1][N_y-pwr_dect-1][kk ] ); } } } return poynt; }//}}} double calc_poynt_3( size_t N_x, size_t N_y, size_t N_z, size_t N_z_ref, int pwr_dect, char absorber[], double EB_WAVE[N_x][N_y][N_z], double EB_WAVE_ref[N_x][N_y][N_z_ref] ) { //{{{ size_t ii, jj, kk; double poynt; poynt = .0; // P = E x H // Px = Ey*Hz - Ez*Hy // Py = Ez*Hx - Ex*Hz // Pz = Ex*Hy - Ey*Hx // Bx: even-odd-odd // By: odd-even-odd // Bz: odd-odd-even // Ex: odd-even-even // Ey: even-odd-even // Ez: even-even-odd if ( strcmp(absorber,"ref_z1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:poynt) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z1-plane // Pz = Ex*Hy - Ey*Hx poynt += ( EB_WAVE_ref[ii+1][jj ][pwr_dect ] *EB_WAVE_ref[ii+1][jj ][pwr_dect+1] -EB_WAVE_ref[ii ][jj+1][pwr_dect ] *EB_WAVE_ref[ii ][jj+1][pwr_dect+1] ); } } } else if ( strcmp(absorber,"z1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:poynt) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z1-plane // Pz = Ex*Hy - Ey*Hx poynt += ( ( EB_WAVE[ii+1][jj ][pwr_dect ] - EB_WAVE_ref[ii+1][jj ][pwr_dect ] ) *( EB_WAVE[ii+1][jj ][pwr_dect+1] - EB_WAVE_ref[ii+1][jj ][pwr_dect+1] ) -( EB_WAVE[ii ][jj+1][pwr_dect ] - EB_WAVE_ref[ii ][jj+1][pwr_dect ] ) *( EB_WAVE[ii ][jj+1][pwr_dect+1] - EB_WAVE_ref[ii ][jj+1][pwr_dect+1] ) ); } } } else if ( strcmp(absorber,"z2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:poynt) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z2-plane // Pz = Ex*Hy - Ey*Hx poynt += ( EB_WAVE[ii+1][jj ][N_z-pwr_dect ] *EB_WAVE[ii+1][jj ][N_z-pwr_dect-1] -EB_WAVE[ii ][jj+1][N_z-pwr_dect ] *EB_WAVE[ii ][jj+1][N_z-pwr_dect-1] ); } } } else if ( strcmp(absorber,"x1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(jj,kk) reduction(+:poynt) for (jj=pwr_dect ; jj<=(N_y-pwr_dect-2) ; jj+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // x1-plane // Px = Ey*Hz - Ez*Hy poynt += ( EB_WAVE[pwr_dect ][jj+1][kk ] *EB_WAVE[pwr_dect+1][jj+1][kk ] -EB_WAVE[pwr_dect ][jj ][kk+1] *EB_WAVE[pwr_dect+1][jj ][kk+1] ); } } } else if ( strcmp(absorber,"x2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(jj,kk) reduction(+:poynt) for (jj=pwr_dect ; jj<=(N_y-pwr_dect-2) ; jj+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // x2-plane // Px = Ey*Hz - Ez*Hy poynt += ( EB_WAVE[N_x-pwr_dect ][jj+1][kk ] *EB_WAVE[N_x-pwr_dect-1][jj+1][kk ] -EB_WAVE[N_x-pwr_dect ][jj ][kk+1] *EB_WAVE[N_x-pwr_dect-1][jj ][kk+1] ); } } } else if ( strcmp(absorber,"y1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,kk) reduction(+:poynt) for (ii=pwr_dect ; ii<=(N_x-pwr_dect-2) ; ii+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // y1-plane // Py = Ez*Hx - Ex*Hz poynt += ( EB_WAVE[ii ][pwr_dect ][kk+1] *EB_WAVE[ii ][pwr_dect+1][kk+1] -EB_WAVE[ii+1][pwr_dect ][kk ] *EB_WAVE[ii+1][pwr_dect+1][kk ] ); } } } else if ( strcmp(absorber,"y2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,kk) reduction(+:poynt) for (ii=pwr_dect ; ii<=(N_x-pwr_dect-2) ; ii+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // y2-plane // Py = Ez*Hx - Ex*Hz poynt += ( EB_WAVE[ii ][N_y-pwr_dect ][kk+1] *EB_WAVE[ii ][N_y-pwr_dect-1][kk+1] -EB_WAVE[ii+1][N_y-pwr_dect ][kk ] *EB_WAVE[ii+1][N_y-pwr_dect-1][kk ] ); } } } return poynt; } //}}} double calc_poynt_4( size_t N_x, size_t N_y, size_t N_z, size_t N_z_ref, int pwr_dect, char absorber[], double EB_WAVE[N_x][N_y][N_z], double EB_WAVE_ref[N_x][N_y][N_z_ref] ) { //{{{ size_t ii, jj, kk; double poynt; poynt = .0; // P = E x H // Px = Ey*Hz - Ez*Hy // Py = Ez*Hx - Ex*Hz // Pz = Ex*Hy - Ey*Hx // Bx: even-odd-odd // By: odd-even-odd // Bz: odd-odd-even // Ex: odd-even-even // Ey: even-odd-even // Ez: even-even-odd if ( strcmp(absorber,"ref_z1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:poynt) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z1-plane // Pz = Ex*Hy - Ey*Hx poynt += ( EB_WAVE_ref[ii+1][jj ][pwr_dect ] *EB_WAVE_ref[ii+1][jj ][pwr_dect+1] -EB_WAVE_ref[ii ][jj+1][pwr_dect ] *EB_WAVE_ref[ii ][jj+1][pwr_dect+1] ); } } } else if ( strcmp(absorber,"z1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:poynt) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z1-plane // Pz = Ex*Hy - Ey*Hx poynt += ( ( EB_WAVE[ii+1][jj ][pwr_dect ] - EB_WAVE_ref[ii+1][jj ][pwr_dect ] ) *( EB_WAVE[ii+1][jj ][pwr_dect+1] - EB_WAVE_ref[ii+1][jj ][pwr_dect+1] ) -( EB_WAVE[ii ][jj+1][pwr_dect ] - EB_WAVE_ref[ii ][jj+1][pwr_dect ] ) *( EB_WAVE[ii ][jj+1][pwr_dect+1] - EB_WAVE_ref[ii ][jj+1][pwr_dect+1] ) ); //poynt += ( ( -EB_WAVE[ii+1][jj ][pwr_dect ] + EB_WAVE_ref[ii+1][jj ][pwr_dect ] ) // *( -EB_WAVE[ii+1][jj ][pwr_dect+1] + EB_WAVE_ref[ii+1][jj ][pwr_dect+1] ) // -( -EB_WAVE[ii ][jj+1][pwr_dect ] + EB_WAVE_ref[ii ][jj+1][pwr_dect ] ) // *( -EB_WAVE[ii ][jj+1][pwr_dect+1] + EB_WAVE_ref[ii ][jj+1][pwr_dect+1] ) ); } } } else if ( strcmp(absorber,"z2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:poynt) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z2-plane // Pz = Ex*Hy - Ey*Hx poynt += ( EB_WAVE[ii+1][jj ][N_z-pwr_dect ] *EB_WAVE[ii+1][jj ][N_z-pwr_dect-1] -EB_WAVE[ii ][jj+1][N_z-pwr_dect ] *EB_WAVE[ii ][jj+1][N_z-pwr_dect-1] ); } } } else if ( strcmp(absorber,"x1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(jj,kk) reduction(+:poynt) for (jj=pwr_dect ; jj<=(N_y-pwr_dect-2) ; jj+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // x1-plane // Px = Ey*Hz - Ez*Hy poynt += ( EB_WAVE[pwr_dect ][jj+1][kk ] *EB_WAVE[pwr_dect+1][jj+1][kk ] -EB_WAVE[pwr_dect ][jj ][kk+1] *EB_WAVE[pwr_dect+1][jj ][kk+1] ); } } } else if ( strcmp(absorber,"x2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(jj,kk) reduction(+:poynt) for (jj=pwr_dect ; jj<=(N_y-pwr_dect-2) ; jj+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // x2-plane // Px = Ey*Hz - Ez*Hy poynt += ( EB_WAVE[N_x-pwr_dect ][jj+1][kk ] *EB_WAVE[N_x-pwr_dect-1][jj+1][kk ] -EB_WAVE[N_x-pwr_dect ][jj ][kk+1] *EB_WAVE[N_x-pwr_dect-1][jj ][kk+1] ); } } } else if ( strcmp(absorber,"y1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,kk) reduction(+:poynt) for (ii=pwr_dect ; ii<=(N_x-pwr_dect-2) ; ii+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // y1-plane // Py = Ez*Hx - Ex*Hz poynt += ( EB_WAVE[ii ][pwr_dect ][kk+1] *EB_WAVE[ii ][pwr_dect+1][kk+1] -EB_WAVE[ii+1][pwr_dect ][kk ] *EB_WAVE[ii+1][pwr_dect+1][kk ] ); } } } else if ( strcmp(absorber,"y2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,kk) reduction(+:poynt) for (ii=pwr_dect ; ii<=(N_x-pwr_dect-2) ; ii+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // y2-plane // Py = Ez*Hx - Ex*Hz poynt += ( EB_WAVE[ii ][N_y-pwr_dect ][kk+1] *EB_WAVE[ii ][N_y-pwr_dect-1][kk+1] -EB_WAVE[ii+1][N_y-pwr_dect ][kk ] *EB_WAVE[ii+1][N_y-pwr_dect-1][kk ] ); } } } return fabs(poynt); } //}}} double calc_poynt_5( size_t N_x, size_t N_y, size_t N_z, size_t N_z_ref, int pwr_dect, char absorber[], double EB_WAVE[N_x][N_y][N_z], double EB_WAVE_ref[N_x][N_y][N_z_ref] ) { //{{{ // same as calc_poynt_4, but with spatial averaging of B-component size_t ii, jj, kk; double poynt; poynt = .0; // P = E x H // Px = Ey*Hz - Ez*Hy // Py = Ez*Hx - Ex*Hz // Pz = Ex*Hy - Ey*Hx // Bx: even-odd-odd // By: odd-even-odd // Bz: odd-odd-even // Ex: odd-even-even // Ey: even-odd-even // Ez: even-even-odd if ( strcmp(absorber,"ref_z1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:poynt) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z1-plane // Pz = Ex*Hy - Ey*Hx poynt += ( EB_WAVE_ref[ii+1][jj ][pwr_dect ] *( EB_WAVE_ref[ii-1][jj ][pwr_dect+1] +EB_WAVE_ref[ii+3][jj ][pwr_dect+1] +EB_WAVE_ref[ii+1][jj-2][pwr_dect+1] +EB_WAVE_ref[ii+1][jj+2][pwr_dect+1] )*.25 -EB_WAVE_ref[ii ][jj+1][pwr_dect ] *( EB_WAVE_ref[ii-2][jj+1][pwr_dect+1] +EB_WAVE_ref[ii+2][jj+1][pwr_dect+1] +EB_WAVE_ref[ii ][jj-1][pwr_dect+1] +EB_WAVE_ref[ii ][jj+3][pwr_dect+1] )*.25 ); } } } else if ( strcmp(absorber,"z1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:poynt) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z1-plane // Pz = Ex*Hy - Ey*Hx poynt += ( ( EB_WAVE[ii+1][jj ][pwr_dect ] - EB_WAVE_ref[ii+1][jj ][pwr_dect ] ) *( ( EB_WAVE[ii-1][jj ][pwr_dect+1] +EB_WAVE[ii+3][jj ][pwr_dect+1] +EB_WAVE[ii+1][jj-2][pwr_dect+1] +EB_WAVE[ii+1][jj+2][pwr_dect+1] )*.25 -( EB_WAVE_ref[ii-1][jj ][pwr_dect+1] +EB_WAVE_ref[ii+3][jj ][pwr_dect+1] +EB_WAVE_ref[ii+1][jj-2][pwr_dect+1] +EB_WAVE_ref[ii+1][jj+2][pwr_dect+1] )*.25 ) -( EB_WAVE[ii ][jj+1][pwr_dect ] - EB_WAVE_ref[ii ][jj+1][pwr_dect ] ) *( ( EB_WAVE[ii-2][jj+1][pwr_dect+1] +EB_WAVE[ii+2][jj+1][pwr_dect+1] +EB_WAVE[ii ][jj-1][pwr_dect+1] +EB_WAVE[ii ][jj+3][pwr_dect+1] )*.25 -( EB_WAVE_ref[ii-2][jj+1][pwr_dect+1] +EB_WAVE_ref[ii+2][jj+1][pwr_dect+1] +EB_WAVE_ref[ii ][jj-1][pwr_dect+1] +EB_WAVE_ref[ii ][jj+3][pwr_dect+1] )*.25 ) ); } } } else if ( strcmp(absorber,"z2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:poynt) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z2-plane // Pz = Ex*Hy - Ey*Hx poynt += ( EB_WAVE[ii+1][jj ][N_z-pwr_dect ] *( EB_WAVE[ii-1][jj ][N_z-pwr_dect-1] +EB_WAVE[ii+3][jj ][N_z-pwr_dect-1] +EB_WAVE[ii+1][jj-2][N_z-pwr_dect-1] +EB_WAVE[ii+1][jj+2][N_z-pwr_dect-1] )*.25 -EB_WAVE[ii ][jj+1][N_z-pwr_dect ] *( EB_WAVE[ii-2][jj+1][N_z-pwr_dect-1] +EB_WAVE[ii+2][jj+1][N_z-pwr_dect-1] +EB_WAVE[ii ][jj-1][N_z-pwr_dect-1] +EB_WAVE[ii ][jj+3][N_z-pwr_dect-1] )*.25 ); } } } else if ( strcmp(absorber,"x1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(jj,kk) reduction(+:poynt) for (jj=pwr_dect ; jj<=(N_y-pwr_dect-2) ; jj+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // x1-plane // Px = Ey*Hz - Ez*Hy poynt += ( EB_WAVE[pwr_dect ][jj+1][kk ] *( EB_WAVE[pwr_dect+1][jj-1][kk ] +EB_WAVE[pwr_dect+1][jj+3][kk ] +EB_WAVE[pwr_dect+1][jj+1][kk-2] +EB_WAVE[pwr_dect+1][jj+1][kk+2] )*.25 -EB_WAVE[pwr_dect ][jj ][kk+1] *( EB_WAVE[pwr_dect+1][jj-2][kk+1] +EB_WAVE[pwr_dect+1][jj+2][kk+1] +EB_WAVE[pwr_dect+1][jj ][kk-1] +EB_WAVE[pwr_dect+1][jj ][kk+3] )*.25 ); } } } else if ( strcmp(absorber,"x2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(jj,kk) reduction(+:poynt) for (jj=pwr_dect ; jj<=(N_y-pwr_dect-2) ; jj+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // x2-plane // Px = Ey*Hz - Ez*Hy poynt += ( EB_WAVE[N_x-pwr_dect ][jj+1][kk ] *( EB_WAVE[N_x-pwr_dect-1][jj-1][kk ] +EB_WAVE[N_x-pwr_dect-1][jj+3][kk ] +EB_WAVE[N_x-pwr_dect-1][jj+1][kk-2] +EB_WAVE[N_x-pwr_dect-1][jj+1][kk+2] )*.25 -EB_WAVE[N_x-pwr_dect ][jj ][kk+1] *( EB_WAVE[N_x-pwr_dect-1][jj-2][kk+1] +EB_WAVE[N_x-pwr_dect-1][jj+2][kk+1] +EB_WAVE[N_x-pwr_dect-1][jj ][kk-1] +EB_WAVE[N_x-pwr_dect-1][jj ][kk+3] )*.25 ); } } } else if ( strcmp(absorber,"y1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,kk) reduction(+:poynt) for (ii=pwr_dect ; ii<=(N_x-pwr_dect-2) ; ii+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // y1-plane // Py = Ez*Hx - Ex*Hz poynt += ( EB_WAVE[ii ][pwr_dect ][kk+1] *( EB_WAVE[ii-2][pwr_dect+1][kk+1] +EB_WAVE[ii+2][pwr_dect+1][kk+1] +EB_WAVE[ii ][pwr_dect+1][kk-1] +EB_WAVE[ii ][pwr_dect+1][kk+3] )*.25 -EB_WAVE[ii+1][pwr_dect ][kk ] *( EB_WAVE[ii-1][pwr_dect+1][kk ] +EB_WAVE[ii+3][pwr_dect+1][kk ] +EB_WAVE[ii+1][pwr_dect+1][kk-2] +EB_WAVE[ii+1][pwr_dect+1][kk+2] )*.25 ); } } } else if ( strcmp(absorber,"y2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,kk) reduction(+:poynt) for (ii=pwr_dect ; ii<=(N_x-pwr_dect-2) ; ii+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // y2-plane // Py = Ez*Hx - Ex*Hz poynt += ( EB_WAVE[ii ][N_y-pwr_dect ][kk+1] *( EB_WAVE[ii-2][N_y-pwr_dect-1][kk+1] +EB_WAVE[ii+2][N_y-pwr_dect-1][kk+1] +EB_WAVE[ii ][N_y-pwr_dect-1][kk-1] +EB_WAVE[ii ][N_y-pwr_dect-1][kk+3] )*.25 -EB_WAVE[ii+1][N_y-pwr_dect ][kk ] *( EB_WAVE[ii-1][N_y-pwr_dect-1][kk ] +EB_WAVE[ii+3][N_y-pwr_dect-1][kk ] +EB_WAVE[ii+1][N_y-pwr_dect-1][kk-2] +EB_WAVE[ii+1][N_y-pwr_dect-1][kk+2] )*.25 ); } } } return fabs(poynt); } //}}} double calc_poynt_6( size_t N_x, size_t N_y, size_t N_z, size_t N_z_ref, int pwr_dect, char absorber[], double EB_WAVE[N_x][N_y][N_z], double EB_WAVE_ref[N_x][N_y][N_z_ref] ) { //{{{ // same as calc_poynt_5, but full spatial averaging of B-component size_t ii, jj, kk; double poynt; poynt = .0; // P = E x H // Px = Ey*Hz - Ez*Hy // Py = Ez*Hx - Ex*Hz // Pz = Ex*Hy - Ey*Hx // Bx: even-odd-odd // By: odd-even-odd // Bz: odd-odd-even // Ex: odd-even-even // Ey: even-odd-even // Ez: even-even-odd if ( strcmp(absorber,"ref_z1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:poynt) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z1-plane // Pz = Ex*Hy - Ey*Hx poynt += ( EB_WAVE_ref[ii+1][jj ][pwr_dect ] *( EB_WAVE_ref[ii-1][jj ][pwr_dect+1] +EB_WAVE_ref[ii+3][jj ][pwr_dect+1] +EB_WAVE_ref[ii+1][jj-2][pwr_dect+1] +EB_WAVE_ref[ii+1][jj+2][pwr_dect+1] +EB_WAVE_ref[ii+1][jj ][pwr_dect-1] +EB_WAVE_ref[ii+1][jj ][pwr_dect+3] )/6. -EB_WAVE_ref[ii ][jj+1][pwr_dect ] *( EB_WAVE_ref[ii-2][jj+1][pwr_dect+1] +EB_WAVE_ref[ii+2][jj+1][pwr_dect+1] +EB_WAVE_ref[ii ][jj-1][pwr_dect+1] +EB_WAVE_ref[ii ][jj+3][pwr_dect+1] +EB_WAVE_ref[ii ][jj+1][pwr_dect-1] +EB_WAVE_ref[ii ][jj+1][pwr_dect+3] )/6. ); } } } else if ( strcmp(absorber,"z1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:poynt) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z1-plane // Pz = Ex*Hy - Ey*Hx poynt += ( ( EB_WAVE[ii+1][jj ][pwr_dect ] - EB_WAVE_ref[ii+1][jj ][pwr_dect ] ) *( ( EB_WAVE[ii-1][jj ][pwr_dect+1] +EB_WAVE[ii+3][jj ][pwr_dect+1] +EB_WAVE[ii+1][jj-2][pwr_dect+1] +EB_WAVE[ii+1][jj+2][pwr_dect+1] +EB_WAVE[ii+1][jj ][pwr_dect-1] +EB_WAVE[ii+1][jj ][pwr_dect+3] )/6. -( EB_WAVE_ref[ii-1][jj ][pwr_dect+1] +EB_WAVE_ref[ii+3][jj ][pwr_dect+1] +EB_WAVE_ref[ii+1][jj-2][pwr_dect+1] +EB_WAVE_ref[ii+1][jj+2][pwr_dect+1] +EB_WAVE_ref[ii+1][jj ][pwr_dect-1] +EB_WAVE_ref[ii+1][jj ][pwr_dect+3] )/6. ) -( EB_WAVE[ii ][jj+1][pwr_dect ] - EB_WAVE_ref[ii ][jj+1][pwr_dect ] ) *( ( EB_WAVE[ii-2][jj+1][pwr_dect+1] +EB_WAVE[ii+2][jj+1][pwr_dect+1] +EB_WAVE[ii ][jj-1][pwr_dect+1] +EB_WAVE[ii ][jj+3][pwr_dect+1] +EB_WAVE[ii ][jj+1][pwr_dect-1] +EB_WAVE[ii ][jj+1][pwr_dect+3] )/6. -( EB_WAVE_ref[ii-2][jj+1][pwr_dect+1] +EB_WAVE_ref[ii+2][jj+1][pwr_dect+1] +EB_WAVE_ref[ii ][jj-1][pwr_dect+1] +EB_WAVE_ref[ii ][jj+3][pwr_dect+1] +EB_WAVE_ref[ii ][jj+1][pwr_dect-1] +EB_WAVE_ref[ii ][jj+1][pwr_dect+3] )/6. ) ); } } } else if ( strcmp(absorber,"z2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:poynt) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z2-plane // Pz = Ex*Hy - Ey*Hx poynt += ( EB_WAVE[ii+1][jj ][N_z-pwr_dect ] *( EB_WAVE[ii-1][jj ][N_z-pwr_dect-1] +EB_WAVE[ii+3][jj ][N_z-pwr_dect-1] +EB_WAVE[ii+1][jj-2][N_z-pwr_dect-1] +EB_WAVE[ii+1][jj+2][N_z-pwr_dect-1] +EB_WAVE[ii+1][jj ][N_z-pwr_dect-3] +EB_WAVE[ii+1][jj ][N_z-pwr_dect+1] )/6. -EB_WAVE[ii ][jj+1][N_z-pwr_dect ] *( EB_WAVE[ii-2][jj+1][N_z-pwr_dect-1] +EB_WAVE[ii+2][jj+1][N_z-pwr_dect-1] +EB_WAVE[ii ][jj-1][N_z-pwr_dect-1] +EB_WAVE[ii ][jj+3][N_z-pwr_dect-1] +EB_WAVE[ii ][jj+1][N_z-pwr_dect-3] +EB_WAVE[ii ][jj+1][N_z-pwr_dect+1] )/6. ); } } } else if ( strcmp(absorber,"x1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(jj,kk) reduction(+:poynt) for (jj=pwr_dect ; jj<=(N_y-pwr_dect-2) ; jj+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // x1-plane // Px = Ey*Hz - Ez*Hy poynt += ( EB_WAVE[pwr_dect ][jj+1][kk ] *( EB_WAVE[pwr_dect+1][jj-1][kk ] +EB_WAVE[pwr_dect+1][jj+3][kk ] +EB_WAVE[pwr_dect+1][jj+1][kk-2] +EB_WAVE[pwr_dect+1][jj+1][kk+2] +EB_WAVE[pwr_dect-1][jj+1][kk ] +EB_WAVE[pwr_dect+3][jj+1][kk ] )/6. -EB_WAVE[pwr_dect ][jj ][kk+1] *( EB_WAVE[pwr_dect+1][jj-2][kk+1] +EB_WAVE[pwr_dect+1][jj+2][kk+1] +EB_WAVE[pwr_dect+1][jj ][kk-1] +EB_WAVE[pwr_dect+1][jj ][kk+3] +EB_WAVE[pwr_dect-1][jj ][kk+1] +EB_WAVE[pwr_dect+3][jj ][kk+1] )/6. ); } } } else if ( strcmp(absorber,"x2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(jj,kk) reduction(+:poynt) for (jj=pwr_dect ; jj<=(N_y-pwr_dect-2) ; jj+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // x2-plane // Px = Ey*Hz - Ez*Hy poynt += ( EB_WAVE[N_x-pwr_dect ][jj+1][kk ] *( EB_WAVE[N_x-pwr_dect-1][jj-1][kk ] +EB_WAVE[N_x-pwr_dect-1][jj+3][kk ] +EB_WAVE[N_x-pwr_dect-1][jj+1][kk-2] +EB_WAVE[N_x-pwr_dect-1][jj+1][kk+2] +EB_WAVE[N_x-pwr_dect-3][jj+1][kk ] +EB_WAVE[N_x-pwr_dect+1][jj+1][kk ] )/6. -EB_WAVE[N_x-pwr_dect ][jj ][kk+1] *( EB_WAVE[N_x-pwr_dect-1][jj-2][kk+1] +EB_WAVE[N_x-pwr_dect-1][jj+2][kk+1] +EB_WAVE[N_x-pwr_dect-1][jj ][kk-1] +EB_WAVE[N_x-pwr_dect-1][jj ][kk+3] +EB_WAVE[N_x-pwr_dect-3][jj ][kk+1] +EB_WAVE[N_x-pwr_dect+1][jj ][kk+1] )/6. ); } } } else if ( strcmp(absorber,"y1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,kk) reduction(+:poynt) for (ii=pwr_dect ; ii<=(N_x-pwr_dect-2) ; ii+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // y1-plane // Py = Ez*Hx - Ex*Hz poynt += ( EB_WAVE[ii ][pwr_dect ][kk+1] *( EB_WAVE[ii-2][pwr_dect+1][kk+1] +EB_WAVE[ii+2][pwr_dect+1][kk+1] +EB_WAVE[ii ][pwr_dect+1][kk-1] +EB_WAVE[ii ][pwr_dect+1][kk+3] +EB_WAVE[ii ][pwr_dect-1][kk+1] +EB_WAVE[ii ][pwr_dect+3][kk+1] )/6. -EB_WAVE[ii+1][pwr_dect ][kk ] *( EB_WAVE[ii-1][pwr_dect+1][kk ] +EB_WAVE[ii+3][pwr_dect+1][kk ] +EB_WAVE[ii+1][pwr_dect+1][kk-2] +EB_WAVE[ii+1][pwr_dect+1][kk+2] +EB_WAVE[ii+1][pwr_dect-1][kk ] +EB_WAVE[ii+1][pwr_dect+3][kk ] )/6. ); } } } else if ( strcmp(absorber,"y2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,kk) reduction(+:poynt) for (ii=pwr_dect ; ii<=(N_x-pwr_dect-2) ; ii+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // y2-plane // Py = Ez*Hx - Ex*Hz poynt += ( EB_WAVE[ii ][N_y-pwr_dect ][kk+1] *( EB_WAVE[ii-2][N_y-pwr_dect-1][kk+1] +EB_WAVE[ii+2][N_y-pwr_dect-1][kk+1] +EB_WAVE[ii ][N_y-pwr_dect-1][kk-1] +EB_WAVE[ii ][N_y-pwr_dect-1][kk+3] +EB_WAVE[ii ][N_y-pwr_dect-3][kk+1] +EB_WAVE[ii ][N_y-pwr_dect+1][kk+1] )/6. -EB_WAVE[ii+1][N_y-pwr_dect ][kk ] *( EB_WAVE[ii-1][N_y-pwr_dect-1][kk ] +EB_WAVE[ii+3][N_y-pwr_dect-1][kk ] +EB_WAVE[ii+1][N_y-pwr_dect-1][kk-2] +EB_WAVE[ii+1][N_y-pwr_dect-1][kk+2] +EB_WAVE[ii+1][N_y-pwr_dect-3][kk ] +EB_WAVE[ii+1][N_y-pwr_dect+1][kk ] )/6. ); } } } return fabs(poynt); } //}}} double calc_poynt_7( size_t N_x, size_t N_y, size_t N_z, size_t N_z_ref, int pwr_dect, char absorber[], double EB_WAVE[N_x][N_y][N_z], double EB_WAVE_ref[N_x][N_y][N_z_ref] ) { //{{{ // same as calc_poynt_4, but with symmetrizing of B-component size_t ii, jj, kk; double poynt; poynt = .0; // P = E x H // Px = Ey*Hz - Ez*Hy // Py = Ez*Hx - Ex*Hz // Pz = Ex*Hy - Ey*Hx // Bx: even-odd-odd // By: odd-even-odd // Bz: odd-odd-even // Ex: odd-even-even // Ey: even-odd-even // Ez: even-even-odd if ( strcmp(absorber,"ref_z1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:poynt) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z1-plane // Pz = Ex*Hy - Ey*Hx poynt += ( EB_WAVE_ref[ii+1][jj ][pwr_dect ] *( EB_WAVE_ref[ii+1][jj ][pwr_dect+1] +EB_WAVE_ref[ii+1][jj ][pwr_dect-1] )*.5 -EB_WAVE_ref[ii ][jj+1][pwr_dect ] *( EB_WAVE_ref[ii ][jj+1][pwr_dect+1] +EB_WAVE_ref[ii ][jj+1][pwr_dect-1] )*.5 ); } } } else if ( strcmp(absorber,"z1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:poynt) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z1-plane // Pz = Ex*Hy - Ey*Hx poynt += ( ( EB_WAVE[ii+1][jj ][pwr_dect ] - EB_WAVE_ref[ii+1][jj ][pwr_dect ] ) *( ( EB_WAVE[ii+1][jj ][pwr_dect+1] +EB_WAVE[ii+1][jj ][pwr_dect-1] )*.5 -( EB_WAVE_ref[ii+1][jj ][pwr_dect+1] +EB_WAVE_ref[ii+1][jj ][pwr_dect-1] )*.5 ) -( EB_WAVE[ii ][jj+1][pwr_dect ] - EB_WAVE_ref[ii ][jj+1][pwr_dect ] ) *( ( EB_WAVE[ii ][jj+1][pwr_dect+1] +EB_WAVE[ii ][jj+1][pwr_dect-1] )*.5 -( EB_WAVE_ref[ii ][jj+1][pwr_dect+1] +EB_WAVE_ref[ii ][jj+1][pwr_dect-1] )*.5 ) ); } } } else if ( strcmp(absorber,"z2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:poynt) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z2-plane // Pz = Ex*Hy - Ey*Hx poynt += ( EB_WAVE[ii+1][jj ][N_z-pwr_dect ] *( EB_WAVE[ii+1][jj ][N_z-pwr_dect-1] +EB_WAVE[ii+1][jj ][N_z-pwr_dect+1] )*.5 -EB_WAVE[ii ][jj+1][N_z-pwr_dect ] *( EB_WAVE[ii ][jj+1][N_z-pwr_dect-1] +EB_WAVE[ii ][jj+1][N_z-pwr_dect+1] )*.5 ); } } } else if ( strcmp(absorber,"x1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(jj,kk) reduction(+:poynt) for (jj=pwr_dect ; jj<=(N_y-pwr_dect-2) ; jj+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // x1-plane // Px = Ey*Hz - Ez*Hy poynt += ( EB_WAVE[pwr_dect ][jj+1][kk ] *( EB_WAVE[pwr_dect+1][jj+1][kk ] +EB_WAVE[pwr_dect-1][jj+1][kk ] )*.5 -EB_WAVE[pwr_dect ][jj ][kk+1] *( EB_WAVE[pwr_dect+1][jj ][kk+1] +EB_WAVE[pwr_dect-1][jj ][kk+1] )*.5 ); } } } else if ( strcmp(absorber,"x2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(jj,kk) reduction(+:poynt) for (jj=pwr_dect ; jj<=(N_y-pwr_dect-2) ; jj+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // x2-plane // Px = Ey*Hz - Ez*Hy poynt += ( EB_WAVE[N_x-pwr_dect ][jj+1][kk ] *( EB_WAVE[N_x-pwr_dect-1][jj+1][kk ] +EB_WAVE[N_x-pwr_dect+1][jj+1][kk ] )*.5 -EB_WAVE[N_x-pwr_dect ][jj ][kk+1] *( EB_WAVE[N_x-pwr_dect-1][jj ][kk+1] +EB_WAVE[N_x-pwr_dect+1][jj ][kk+1] )*.5 ); } } } else if ( strcmp(absorber,"y1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,kk) reduction(+:poynt) for (ii=pwr_dect ; ii<=(N_x-pwr_dect-2) ; ii+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // y1-plane // Py = Ez*Hx - Ex*Hz poynt += ( EB_WAVE[ii ][pwr_dect ][kk+1] *( EB_WAVE[ii ][pwr_dect+1][kk+1] +EB_WAVE[ii ][pwr_dect-1][kk+1] )*.5 -EB_WAVE[ii+1][pwr_dect ][kk ] *( EB_WAVE[ii+1][pwr_dect+1][kk ] +EB_WAVE[ii+1][pwr_dect-1][kk ] )*.5 ); } } } else if ( strcmp(absorber,"y2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,kk) reduction(+:poynt) for (ii=pwr_dect ; ii<=(N_x-pwr_dect-2) ; ii+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // y2-plane // Py = Ez*Hx - Ex*Hz poynt += ( EB_WAVE[ii ][N_y-pwr_dect ][kk+1] *( EB_WAVE[ii ][N_y-pwr_dect-1][kk+1] +EB_WAVE[ii ][N_y-pwr_dect+1][kk+1] )*.5 -EB_WAVE[ii+1][N_y-pwr_dect ][kk ] *( EB_WAVE[ii+1][N_y-pwr_dect-1][kk ] +EB_WAVE[ii+1][N_y-pwr_dect+1][kk ] )*.5 ); } } } return fabs(poynt); } //}}} double calc_power_EE_1( size_t N_x, size_t N_y, size_t N_z, size_t N_z_ref, int pwr_dect, char absorber[], double EB_WAVE[N_x][N_y][N_z], double EB_WAVE_ref[N_x][N_y][N_z_ref] ) { //{{{ // sum-up the power (squared E-field) in the absorbers size_t ii, jj, kk; double power; power = .0; // Bx: even-odd-odd // By: odd-even-odd // Bz: odd-odd-even // Ex: odd-even-even // Ey: even-odd-even // Ez: even-even-odd if ( strcmp(absorber,"ref_z1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:power) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z1-plane power += ( pow(EB_WAVE_ref[ii+1][jj ][pwr_dect ], 2) +pow(EB_WAVE_ref[ii ][jj+1][pwr_dect ], 2) +pow(EB_WAVE_ref[ii ][jj ][pwr_dect+1], 2) ); } } } else if ( strcmp(absorber,"z1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:power) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z1-plane power += ( pow((EB_WAVE[ii+1][jj ][pwr_dect ] - EB_WAVE_ref[ii+1][jj ][pwr_dect ]), 2) +pow((EB_WAVE[ii ][jj+1][pwr_dect ] - EB_WAVE_ref[ii ][jj+1][pwr_dect ]), 2) +pow((EB_WAVE[ii ][jj ][pwr_dect+1] - EB_WAVE_ref[ii ][jj ][pwr_dect+1]), 2) ); } } } else if ( strcmp(absorber,"z2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,jj) reduction(+:power) for (ii=pwr_dect ; ii<(N_x-pwr_dect-2) ; ii+=2) { for (jj=pwr_dect ; jj<(N_y-pwr_dect-2) ; jj+=2) { // z2-plane power += ( pow(EB_WAVE[ii+1][jj ][N_z-pwr_dect ], 2) +pow(EB_WAVE[ii ][jj+1][N_z-pwr_dect ], 2) +pow(EB_WAVE[ii ][jj ][N_z-pwr_dect-1], 2) ); } } } else if ( strcmp(absorber,"x1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(jj,kk) reduction(+:power) for (jj=pwr_dect ; jj<=(N_y-pwr_dect-2) ; jj+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // x1-plane power += ( pow(EB_WAVE[pwr_dect+1][jj ][kk ], 2) +pow(EB_WAVE[pwr_dect ][jj+1][kk ], 2) +pow(EB_WAVE[pwr_dect ][jj ][kk+1], 2) ); } } } else if ( strcmp(absorber,"x2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(jj,kk) reduction(+:power) for (jj=pwr_dect ; jj<=(N_y-pwr_dect-2) ; jj+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // x2-plane power += ( pow(EB_WAVE[N_x-pwr_dect-1][jj ][kk ], 2) +pow(EB_WAVE[N_x-pwr_dect ][jj+1][kk ], 2) +pow(EB_WAVE[N_x-pwr_dect ][jj ][kk+1], 2) ); } } } else if ( strcmp(absorber,"y1") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,kk) reduction(+:power) for (ii=pwr_dect ; ii<=(N_x-pwr_dect-2) ; ii+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // y1-plane power += ( pow(EB_WAVE[ii+1][pwr_dect ][kk ], 2) +pow(EB_WAVE[ii ][pwr_dect+1][kk ], 2) +pow(EB_WAVE[ii ][pwr_dect ][kk+1], 2) ); } } } else if ( strcmp(absorber,"y2") == 0 ) { #pragma omp parallel for collapse(2) default(shared) private(ii,kk) reduction(+:power) for (ii=pwr_dect ; ii<=(N_x-pwr_dect-2) ; ii+=2) { for (kk=pwr_dect ; kk<=(N_z-pwr_dect-2) ; kk+=2) { // y2-plane power += ( pow(EB_WAVE[ii+1][N_y-pwr_dect ][kk ], 2) +pow(EB_WAVE[ii ][N_y-pwr_dect+1][kk ], 2) +pow(EB_WAVE[ii ][N_y-pwr_dect ][kk+1], 2) ); } } } return fabs(power); } //}}} #ifdef DETECTOR_ANTENNA_1D int detAnt1D_storeValues( size_t N_x, size_t N_y, size_t N_z, size_t detAnt_ypos, size_t detAnt_zpos, int tt, double period, double EB_WAVE[N_x][N_y][N_z], double detAnt_fields[N_x/2][5] ) { //{{{ size_t ii; double foo; // Ex: odd-even-even // Ey: even-odd-even // Ez: even-even-odd #pragma omp parallel default(shared) private(ii,foo) #pragma omp for for ( ii=2 ; ii <= N_x-2 ; ii+=2 ) { // calculate abs(E) foo = sqrt( pow(EB_WAVE[ii+1][detAnt_ypos ][detAnt_zpos ],2) +pow(EB_WAVE[ii ][detAnt_ypos+1][detAnt_zpos ],2) +pow(EB_WAVE[ii ][detAnt_ypos ][detAnt_zpos+1],2) ); // sum of E over time // Ex*Ex detAnt_fields[ii/2][0] += pow( EB_WAVE[ii+1][detAnt_ypos ][detAnt_zpos ], 2 ); // Ey*Ey detAnt_fields[ii/2][1] += pow( EB_WAVE[ii ][detAnt_ypos+1][detAnt_zpos ], 2 ); // Ez*Ez detAnt_fields[ii/2][2] += pow( EB_WAVE[ii ][detAnt_ypos ][detAnt_zpos+1], 2 ); // E*E detAnt_fields[ii/2][3] += foo*foo; // corresponding to an rms(E)-like quantity detAnt_fields[ii/2][4] += ( foo * sqrt(1./( (double)(tt)/(double)(period) + 1e-6 )) ); //printf( "tt = %d, ii = %d, sum_t(E*E) = %13.5e\n", // tt, ii, detAnt_fields[ii/2][3] ); } return EXIT_SUCCESS; }//}}} #endif #if defined(HDF5) && defined(DETECTOR_ANTENNA_1D) int detAnt1D_write2hdf5( int N_x, char filename[], char detAnt_groupName[], size_t detAnt_ypos, size_t detAnt_zpos, double detAnt_fields[N_x/2][5] ){ //#{{{ // hdf related variables hid_t file_id, dataset_id, dataspace_id, // object identifiers group_id__detAnt, dataspace_id_i, dataset_id_i; hsize_t dims[1]; // size used for dimensions herr_t status; // function return value // hdf5 related variables for applying shuffle and gzip filter hid_t dcpl; hsize_t chunk[1]; unsigned int filter_info; int filter_avail; // required for check if hdf5-file already exists struct stat st; int ii; double data2save[N_x/2]; dims[0] = N_x/2; chunk[0] = N_x/2; // assume as default setting that filters are available filter_avail = 1; //data2save = dvector( 0, n_elem/2 ); set2zero_1D( N_x/2, data2save ); printf( "Will write data for detector antenna position y=%05ld, z=%05ld into file %s\n", detAnt_ypos, detAnt_zpos, filename ); // check if specified hdf5 file already exists // if not, create new one; if yes, open and add dataset to it if ( stat( filename, &st )==0 ) { // open file for read + write access file_id = H5Fopen( filename, // filename H5F_ACC_RDWR, // allow read & write access (_RDONLY for read only) H5P_DEFAULT); // file access property list (default one) } else { // create a new file using default properties. file_id = H5Fcreate( filename, // filename H5F_ACC_TRUNC, // how file should be created (removes existing file) H5P_DEFAULT, // file creating property list H5P_DEFAULT); // file access property list } // create group for different data to be stored group_id__detAnt = H5Gcreate2( file_id, detAnt_groupName, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); // check if filters for shuffle and gzip exists, if yes, apply them // (check is done, because the filters are optional parts of hdf5 library) // check if gzip-filter is available if ( !(H5Zfilter_avail( H5Z_FILTER_DEFLATE )) ) { printf( "WARNING: gzip filter not available (for hdf5)\n" ); filter_avail = 0; } else { status = H5Zget_filter_info( H5Z_FILTER_DEFLATE, &filter_info ); if ( !(filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) || !(filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED) ) { printf( "WARNING: gzip filter not available for encoding and decoding (for hdf5)\n" ); filter_avail = 0; } } // check if shuffle-filter is available if ( !(H5Zfilter_avail( H5Z_FILTER_SHUFFLE )) ) { printf( "WARNING: shuffle filter not available (for hdf5)\n" ); filter_avail = 0; } else { status = H5Zget_filter_info( H5Z_FILTER_SHUFFLE, &filter_info ); if ( !(filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) || !(filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED) ) { printf( "WARNING: shuffle filter not available for encoding and decoding (for hdf5)\n" ); filter_avail = 0; } } // apply shuffle and gzip filters, if available if (filter_avail) { // create dataset creation property list dcpl = H5Pcreate( H5P_DATASET_CREATE ); // add shuffle filter and gzip compression filter // note that the order of filter is significant: first shuffle! // order of filters applied correspond to order in which they are invoked when writin gdata status = H5Pset_shuffle( dcpl ); status = H5Pset_deflate( dcpl, 9 ); status = H5Pset_chunk(dcpl, 1, chunk ); } // store spatial coordinate // prepare array for ( ii=2 ; ii<=N_x-2 ; ii+=2 ) { data2save[ii/2] = (double)(ii) ; } // create data space dataspace_id_i = H5Screate_simple( 1, // rank of array (number of dimensions of dataspace) dims, // array of the size of each dimension NULL); // allow stretching of data space (NULL=no) // printf( "dataspace_id_i=%d\n", dataspace_id_i); // create new dataset and links it to location in file printf( "start to create dataset 'j'\n" ); if (filter_avail) { dataset_id_i = H5Dcreate( group_id__detAnt, // file identifier (or group identifier) "i", // name of dataset (relative to group specified, if speficied) H5T_NATIVE_DOUBLE, // datatype to use when creating dataset dataspace_id_i, // dataspace identifier H5P_DEFAULT, // link creation property list dcpl, // dataset creation property list H5P_DEFAULT); // dataset access property list } else { dataset_id_i = H5Dcreate( group_id__detAnt, // file identifier (or group identifier) "i", // name of dataset (relative to group specified, if speficied) H5T_NATIVE_DOUBLE, // datatype to use when creating dataset dataspace_id_i, // dataspace identifier H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); } // write the dataset status = H5Dwrite( dataset_id_i, // dataset identifier H5T_NATIVE_DOUBLE, // informs hdf about format of data in memory of computer H5S_ALL, // identifier of memory dataspace H5S_ALL, // file space identifier H5P_DEFAULT, // data transfer property list data2save); // pointer to data array status = H5Dclose(dataset_id_i); status = H5Sclose(dataspace_id_i); // store position dims[0] = 1; data2save[0] = (double)(detAnt_ypos); data2save[1] = (double)(detAnt_zpos); dataspace_id = H5Screate_simple( 1, dims, NULL); // detAnt_ypos printf( "start to create dataset 'detAnt_ypos'\n" ); dataset_id = H5Dcreate( group_id__detAnt, "detAnt_ypos", H5T_NATIVE_DOUBLE, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); status = H5Dwrite( dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data2save[0]); status = H5Dclose(dataset_id); // status = H5Sclose(dataspace_id); // detAnt_zpos // dataspace_id = H5Screate_simple( 1, dims, NULL); printf( "start to create dataset 'detAnt_zpos'\n" ); dataset_id = H5Dcreate( group_id__detAnt, "detAnt_zpos", H5T_NATIVE_DOUBLE, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); status = H5Dwrite( dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data2save[1]); status = H5Dclose(dataset_id); status = H5Sclose(dataspace_id); // store sum_ExEx dims[0] = N_x/2; // since all following arrays have same dimension, dataspace_id needs to be created only once // and not closed with H5Sclose(dataspace_id) after each dataset dataspace_id = H5Screate_simple( 1, dims, NULL); // prepare array to be saved set2zero_1D( N_x/2, data2save ); for ( ii=2 ; ii<=N_x-2 ; ii+=2 ) data2save[ii/2] = detAnt_fields[ii/2][0]; printf( "start to create dataset 'sum_ExEx'\n" ); if (filter_avail) dataset_id = H5Dcreate( group_id__detAnt, "sum_ExEx", H5T_NATIVE_DOUBLE, dataspace_id, H5P_DEFAULT, dcpl, H5P_DEFAULT); else dataset_id = H5Dcreate( group_id__detAnt, "sum_ExEx", H5T_NATIVE_DOUBLE, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); status = H5Dwrite( dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, data2save); status = H5Dclose(dataset_id); // status = H5Sclose(dataspace_id); // store sum_ExEx // store sum_EyEy set2zero_1D( N_x/2, data2save ); for ( ii=2 ; ii<=N_x-2 ; ii+=2 ) data2save[ii/2] = detAnt_fields[ii/2][1]; // dataspace_id = H5Screate_simple( 1, dims, NULL); printf( "start to create dataset 'sum_EyEy'\n" ); if (filter_avail) dataset_id = H5Dcreate( group_id__detAnt, "sum_EyEy", H5T_NATIVE_DOUBLE, dataspace_id, H5P_DEFAULT, dcpl, H5P_DEFAULT); else dataset_id = H5Dcreate( group_id__detAnt, "sum_EyEy", H5T_NATIVE_DOUBLE, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); status = H5Dwrite( dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, data2save); status = H5Dclose(dataset_id); // status = H5Sclose(dataspace_id); // store sum_EzEz set2zero_1D( N_x/2, data2save ); for ( ii=2 ; ii<=N_x-2 ; ii+=2 ) data2save[ii/2] = detAnt_fields[ii/2][2]; // dataspace_id = H5Screate_simple( 1, dims, NULL); printf( "start to create dataset 'sum_EzEz'\n" ); if (filter_avail) dataset_id = H5Dcreate( group_id__detAnt, "sum_EzEz", H5T_NATIVE_DOUBLE, dataspace_id, H5P_DEFAULT, dcpl, H5P_DEFAULT); else dataset_id = H5Dcreate( group_id__detAnt, "sum_EzEz", H5T_NATIVE_DOUBLE, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); status = H5Dwrite( dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, data2save); status = H5Dclose(dataset_id); // status = H5Sclose(dataspace_id); // store sum_EE set2zero_1D( N_x/2, data2save ); for ( ii=2 ; ii<=N_x-2 ; ii+=2 ) data2save[ii/2] = detAnt_fields[ii/2][3]; // dataspace_id = H5Screate_simple( 1, dims, NULL); printf( "start to create dataset 'sum_EE'\n" ); if (filter_avail) dataset_id = H5Dcreate( group_id__detAnt, "sum_EE", H5T_NATIVE_DOUBLE, dataspace_id, H5P_DEFAULT, dcpl, H5P_DEFAULT); else dataset_id = H5Dcreate( group_id__detAnt, "sum_EE", H5T_NATIVE_DOUBLE, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); status = H5Dwrite( dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, data2save); status = H5Dclose(dataset_id); // status = H5Sclose(dataspace_id); // store rmsE set2zero_1D( N_x/2, data2save ); for ( ii=2 ; ii<=N_x-2 ; ii+=2 ) data2save[ii/2] = detAnt_fields[ii/2][4]; // dataspace_id = H5Screate_simple( 1, dims, NULL); printf( "start to create dataset 'rms_E'\n" ); if (filter_avail) dataset_id = H5Dcreate( group_id__detAnt, "rms_E", H5T_NATIVE_DOUBLE, dataspace_id, H5P_DEFAULT, dcpl, H5P_DEFAULT); else dataset_id = H5Dcreate( group_id__detAnt, "rms_E", H5T_NATIVE_DOUBLE, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); status = H5Dwrite( dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, data2save); status = H5Dclose(dataset_id); status = H5Sclose(dataspace_id); // terminate access and free ressources/identifiers if (filter_avail) status = H5Pclose( dcpl ); status = H5Gclose( group_id__detAnt ); // file status = H5Fclose(file_id); return EXIT_SUCCESS; }//#}}} #endif #ifdef HDF5 int writeMyHDF_v4( int dim0, int dim1, int dim2, char filename[], char dataset[], double array_3D[dim0][dim1][dim2] ) { //#{{{ // hdf related variables hid_t file_id, dataset_id, dataspace_id; // object identifiers hsize_t dims[3]; // size used for dimensions herr_t status; // function return value // hdf5 related variables used for applying shuffle and compression filter hid_t dcpl; hsize_t chunk[3]; unsigned int filter_info; int filter_avail; // required for check if hdf5-file already exists struct stat st; // assume as default setting, that filters are available filter_avail = 1; // check if specified hdf5 file already exists // if not, create new one; if yes, open and add dataset to it if ( stat( filename, &st )==0 ) { // open file for read + write access file_id = H5Fopen( filename, // filename H5F_ACC_RDWR, // allow read & write access (_RDONLY for read only) H5P_DEFAULT); // file access property list (default one) // hdf5 version 1.8.0 introduced H5Lexists to check if link (to group or dataset) exists in hdf5-file #if H5_VERS_MAJOR>=1 && H5_VERS_MINOR>=8 if ( H5_VERS_MINOR >= 10 ) { printf( "WARNING: hdf5 version 1.10 (or larger is used)\n" ); printf( " behavior of H5Lexists was slightly changed in this version\n" ); printf( " for details, see https://support.hdfgroup.org/HDF5/doc/RM/RM_H5L.html#Link-Exists\n" ); } if ( H5Lexists( file_id, // file or group identifier dataset, // name of link (to group or dataset) to check H5P_DEFAULT ) // link access property list identifiert > 0 ) { // NOTE: for version 1.8.10, this might be slightly different printf( "ERROR: dataset named '%s' already exists in file '%s'\n", dataset, filename ); printf( " dataset will NOT be saved (no overwrite by default)\n" ); status = H5Fclose(file_id); return EXIT_FAILURE; } #endif } else { // create a new file using default properties. file_id = H5Fcreate( filename, // filename H5F_ACC_TRUNC, // how file should be created (removes existing file) H5P_DEFAULT, // file creating property list H5P_DEFAULT); // file access property list } // create simple data space for the dataset // (simple = regular N-dimensional array, i.e. data on regular rectangular grid) // (complex = e.g.: irregular grids resulting from dynamic refinement of mesh) dims[0] = dim0; dims[1] = dim1; dims[2] = dim2; dataspace_id = H5Screate_simple( 3, // number of dimensions of dataspace dims, // size of array in each dimension NULL); // allow stretching of data space (NULL=no) // check if filters for shuffle and gzip exists, if yes, apply them // (check is done, because the filters are optional parts of hdf5 library) // check if gzip-filter is available if ( !(H5Zfilter_avail( H5Z_FILTER_DEFLATE )) ) { printf( "WARNING: gzip filter not available (for hdf5)\n" ); filter_avail = 0; } else { status = H5Zget_filter_info( H5Z_FILTER_DEFLATE, &filter_info ); if ( !(filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) || !(filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED) ) { printf( "WARNING: gzip filter not available for encoding and decoding (for hdf5)\n" ); filter_avail = 0; } } // check if shuffle-filter is available if ( !(H5Zfilter_avail( H5Z_FILTER_SHUFFLE )) ) { printf( "WARNING: shuffle filter not available (for hdf5)\n" ); filter_avail = 0; } else { status = H5Zget_filter_info( H5Z_FILTER_SHUFFLE, &filter_info ); if ( !(filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) || !(filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED) ) { printf( "WARNING: shuffle filter not available for encoding and decoding (for hdf5)\n" ); filter_avail = 0; } } // apply shuffle and gzip filters, if available if (filter_avail) { // set chunk size to be same as dimension (might not be optimal, but seems same as h5repack) chunk[0] = dim0; chunk[1] = dim1; chunk[2] = dim2; // create dataset creation property list dcpl = H5Pcreate( H5P_DATASET_CREATE ); // add shuffle filter and gzip compression filter // note that the order of filter is significant: first shuffle! // order of filters applied correspond to order in which they are invoked when writin gdata status = H5Pset_shuffle( dcpl ); status = H5Pset_deflate( dcpl, 9 ); status = H5Pset_chunk(dcpl, // dataset creation property list identifier 3, // number of dimensions of each chunk chunk ); // array defining size, in dataset elements, of each chunk // create the dataset dataset_id = H5Dcreate( file_id, // file identifier (or group identifier) dataset, // name of dataset (relative to group specified, if speficied) H5T_NATIVE_DOUBLE, // datatype to use when creating dataset dataspace_id, // dataspace identifier H5P_DEFAULT, // link creation property list (was dataset creating property list <=v1.6) dcpl, // dataset creation property list (added in HDF5v1.8) H5P_DEFAULT); // dataset access property list (added in HDF5v1.8) } else { // create the dataset dataset_id = H5Dcreate( file_id, // file identifier (or group identifier) dataset, // name of dataset (relative to group specified, if speficied) H5T_NATIVE_DOUBLE, // datatype to use when creating dataset dataspace_id, // dataspace identifier H5P_DEFAULT, // link creation property list (was dataset creating property list <=v1.6) H5P_DEFAULT, // dataset creation property list (added in HDF5v1.8) H5P_DEFAULT); // dataset access property list (added in HDF5v1.8) } // write the dataset status = H5Dwrite( dataset_id, // dataset identifier H5T_NATIVE_DOUBLE, // informs hdf about format of data in memory of computer H5S_ALL, // identifier of memory dataspace H5S_ALL, // file space identifier H5P_DEFAULT, // data transfer property list // array_2D[0]); // pointer to data array array_3D); // pointer to data array // terminate access and free ressources/identifiers // dataset creation property list if (filter_avail) status = H5Pclose(dcpl); // dataset status = H5Dclose(dataset_id); // data space status = H5Sclose(dataspace_id); // file status = H5Fclose(file_id); return EXIT_SUCCESS; }//#}}} #endif #ifdef HDF5 int writeConfig2HDF( char filename[], int N_x, int N_y, int N_z, int period, int d_absorb ) { //#{{{ long data2write_long[1]; double data2write_double[1]; // hdf related variables hid_t file_id, dataset_id, dataspace_id; // object identifiers hsize_t dims[1]; // size used for dimensions herr_t status; // function return value // note that shuffle and compression filter is not applied here, as only single values are saved // required for check if hdf5-file already exists struct stat st; // check if specified hdf5 file already exists // if not, create new one; if yes, open and add dataset to it if ( stat( filename, &st )==0 ) { // open file for read + write access file_id = H5Fopen( filename, // filename H5F_ACC_RDWR, // allow read & write access (_RDONLY for read only) H5P_DEFAULT); // file access property list (default one) // hdf5 version 1.8.0 introduced H5Lexists to check if link (to group or dataset) exists in hdf5-file #if H5_VERS_MAJOR>=1 && H5_VERS_MINOR>=8 if ( H5_VERS_MINOR >= 10 ) { printf( "WARNING: hdf5 version 1.10 (or larger is used)\n" ); printf( " behavior of H5Lexists was slightly changed in this version\n" ); printf( " for details, see https://support.hdfgroup.org/HDF5/doc/RM/RM_H5L.html#Link-Exists\n" ); } if ( H5Lexists( file_id, // file or group identifier "/config", // name of link (to group or dataset) to check H5P_DEFAULT ) // link access property list identifiert > 0 ) { // NOTE: for version 1.8.10, this might be slightly different printf( "ERROR: dataset named '/config' already exists in file '%s'\n", filename ); printf( " dataset will NOT be saved (no overwrite by default)\n" ); status = H5Fclose(file_id); return EXIT_FAILURE; } #endif } else { // create a new file using default properties. file_id = H5Fcreate( filename, // filename H5F_ACC_TRUNC, // how file should be created (removes existing file) H5P_DEFAULT, // file creating property list H5P_DEFAULT); // file access property list } // create new group for config parameters H5Gcreate( file_id, "/config", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); // create simple data space for the dataset // (simple = regular N-dimensional array, i.e. data on regular rectangular grid) // (complex = e.g.: irregular grids resulting from dynamic refinement of mesh) dims[0] = 1; dataspace_id = H5Screate_simple( 1, // number of dimensions of dataspace dims, // size of array in each dimension NULL); // allow stretching of data space (NULL=no) // period // create the dataset dataset_id = H5Dcreate( file_id, // file identifier (or group identifier) "/config/period", // name of dataset (relative to group specified, if speficied) H5T_NATIVE_LONG, // datatype to use when creating dataset dataspace_id, // dataspace identifier H5P_DEFAULT, // link creation property list (was dataset creating property list <=v1.6) H5P_DEFAULT, // dataset creation property list (added in HDF5v1.8) H5P_DEFAULT); // dataset access property list (added in HDF5v1.8) // write the dataset data2write_long[0] = (long)period; status = H5Dwrite( dataset_id, // dataset identifier H5T_NATIVE_LONG, // informs hdf about format of data in memory of computer H5S_ALL, // identifier of memory dataspace H5S_ALL, // file space identifier H5P_DEFAULT, // data transfer property list data2write_long); // pointer to data array // terminate access and free ressources/identifiers for dataset status = H5Dclose(dataset_id); // d_absorb dataset_id = H5Dcreate( file_id, "/config/d_absorb", H5T_NATIVE_LONG, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); data2write_long[0] = (long)d_absorb; status = H5Dwrite( dataset_id, H5T_NATIVE_LONG, H5S_ALL, H5S_ALL, H5P_DEFAULT, data2write_long); status = H5Dclose(dataset_id); // N_x dataset_id = H5Dcreate( file_id, "/config/N_x", H5T_NATIVE_LONG, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); data2write_long[0] = (long)N_x; status = H5Dwrite( dataset_id, H5T_NATIVE_LONG, H5S_ALL, H5S_ALL, H5P_DEFAULT, data2write_long); status = H5Dclose(dataset_id); // N_y dataset_id = H5Dcreate( file_id, "/config/N_y", H5T_NATIVE_LONG, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); data2write_long[0] = (long)N_y; status = H5Dwrite( dataset_id, H5T_NATIVE_LONG, H5S_ALL, H5S_ALL, H5P_DEFAULT, data2write_long); status = H5Dclose(dataset_id); // N_z dataset_id = H5Dcreate( file_id, "/config/N_z", H5T_NATIVE_LONG, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); data2write_long[0] = (long)N_z; status = H5Dwrite( dataset_id, H5T_NATIVE_LONG, H5S_ALL, H5S_ALL, H5P_DEFAULT, data2write_long); status = H5Dclose(dataset_id); // terminate access and free ressources/identifiers // data space status = H5Sclose(dataspace_id); // file status = H5Fclose(file_id); return EXIT_SUCCESS; }//#}}} #endif #ifdef HDF5 int readMyHDF( int dim0, int dim1, int dim2, char filename[], char dataset[], double array_3D[dim0][dim1][dim2]) { //#{{{ // hdf handles hid_t file_id, dset_id; herr_t status; //hsize_t dims[3] = { dim0, dim1, dim2}; int ii, jj; // open file using default properties file_id = H5Fopen( filename, H5F_ACC_RDONLY, H5P_DEFAULT); // open dataset using default properties dset_id = H5Dopen( file_id, dataset, H5P_DEFAULT); // set the pointers to rows to the correct addresses // must be here, otherwise, data is not read correctly! // might be unnecessary, if matrix-allocation would be different (?) //for ( ii=1; ii<dims[0]; ii++ ) // array_2D[ii] = array_2D[0] + ii * dims[1]; // Read the data using the default properties. status = H5Dread( dset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, array_3D ); // close the dataset status = H5Dclose( dset_id); // close the file status = H5Fclose( file_id); return EXIT_SUCCESS; }//#}}} #endif int writeTimetraces2ascii( int dim0, int dim1, int t_end, double period, char filename[], double timetraces[dim0][dim1] ) { //{{{ size_t ii; FILE *file_pntr; // open file in w(rite) mode; might consider using a+ instead file_pntr = fopen( filename, "w" ); if (file_pntr == NULL) { printf( "ERROR: Unable to create file for timetraces.\n" ); return EXIT_FAILURE; } else { // NOTE: if return value of printf < 0, then writing failed. // might be good idea to implicetely check this // e.g. if ( (fprintf( file_pntr, "a b c" )) < 0 ) .... fprintf( file_pntr, "# T poynt_z1 poynt_z2 poynt_x1 poynt_x2 poynt_y1 poynt_y2 P_out\n" ); for ( ii=0 ; ii<(t_end/(int)period) ; ++ii ) fprintf( file_pntr, " %4d %13.6e %13.6e %13.6e %13.6e %13.6e %13.6e %13.6e\n", (int)timetraces[ii][1], timetraces[ii][2], timetraces[ii][3], timetraces[ii][4], timetraces[ii][5], timetraces[ii][6], timetraces[ii][7], (timetraces[ii][2]+timetraces[ii][3] + timetraces[ii][4]+timetraces[ii][5] + timetraces[ii][6]+timetraces[ii][7]) ); if ((fclose(file_pntr)) == EOF) { printf( "ERROR: could not close file for timetraces.\n" ); } } printf( "successfully written timetraces into %s\n", filename ); return EXIT_SUCCESS; }//}}} int set2zero_1D( size_t N_x, double arr_1D[N_x] ){ //{{{ size_t ii; #pragma omp parallel for default(shared) private(ii) for (ii=0 ; ii<N_x ; ++ii) { arr_1D[ii] = .0; } return EXIT_SUCCESS; } //}}} int set2zero_3D( size_t N_x, size_t N_y, size_t N_z, double arr_3D[N_x][N_y][N_z] ){ //{{{ size_t ii, jj, kk; #pragma omp parallel for collapse(3) default(shared) private(ii,jj,kk) for (ii=0 ; ii<N_x ; ++ii) { for (jj=0 ; jj<N_y ; ++jj) { for (kk=0 ; kk<N_z ; ++kk) { arr_3D[ii][jj][kk] = .0; } } } return EXIT_SUCCESS; } //}}}
stream.c
/*-----------------------------------------------------------------------*/ /* Program: Stream */ /* Revision: $Id: stream.c,v 1.11 2012/07/18 14:24:24 rajamony Exp $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in GB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2003: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ #include <float.h> #include <limits.h> #include <stdint.h> #include "utils.h" #ifdef _OPENMP #include <omp.h> #endif #define TUNED 1 #define VERBOSE 1 /* INSTRUCTIONS: * * 1) Stream requires a good bit of memory to run. Adjust the * value of 'N' (below) to give a 'timing calibration' of * at least 20 clock-ticks. This will provide rate estimates * that should be good to about 5% precision. */ static uint64_t VectorSize = 1650163200; # define N 2000000 # define NTIMES 10 # define OFFSET 0 /* * 3) Compile the code with full optimization. Many compilers * generate unreasonably bad code before the optimizer tightens * things up. If the results are unreasonably good, on the * other hand, the optimizer might be too smart for me! * * Try compiling with: * cc -O stream_omp.c -o stream_omp * * This is known to work on Cray, SGI, IBM, and Sun machines. * * * 4) Mail the results to mccalpin@cs.virginia.edu * Be sure to include: * a) computer hardware model number and software revision * b) the compiler flags * c) all of the output from the test case. * Thanks! * */ # define HLINE "-------------------------------------------------------------\n" static double *a, *b, *c; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(double), 2 * sizeof(double), 3 * sizeof(double), 3 * sizeof(double) }; #define mysecond GetTime #ifdef TUNED extern void tuned_STREAM_Copy(void); extern void tuned_STREAM_Scale(double scalar); extern void tuned_STREAM_Add(void); extern void tuned_STREAM_Triad(double scalar); #endif static void checkSTREAMresults (FILE *outFile, int doIO, int *failure) { double aj,bj,cj,scalar; double asum,bsum,csum; double epsilon; uint64_t j,k; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k=0; k<NTIMES; k++) { cj = aj; bj = scalar*cj; cj = aj+bj; aj = bj+scalar*cj; } #if 0 if (doIO) { fprintf (stderr, "aj: %f\n", aj); for (j=0; j<VectorSize; j++) {if (a[j] != aj) fprintf (outFile, "bad at %d = %f\n", j, a[j]); } fprintf (stderr, "bj: %f\n", bj); for (j=0; j<VectorSize; j++) {if (b[j] != bj) fprintf (outFile, "bad at %d = %f\n", j, b[j]); } fprintf (stderr, "cj: %f\n", cj); for (j=0; j<VectorSize; j++) {if (c[j] != cj) fprintf (outFile, "bad at %d = %f\n", j, c[j]); } } #endif // ----- START FIX OF STREAM VALIDATION - OLD METHOD IS ALL WRONG - emailed Piotr Luczek about it and have his concurrence #if 0 aj = aj * (double) VectorSize; bj = bj * (double) VectorSize; cj = cj * (double) VectorSize; #endif int abad = 0, bbad = 0, cbad = 0; asum = 0.0; bsum = 0.0; csum = 0.0; for (j=0; j<VectorSize; j++) { abad += (aj != a[j]); bbad += (bj != b[j]); cbad += (cj != c[j]); } asum = (abad == 0) ? aj : 0; bsum = (bbad == 0) ? bj : 0; csum = (cbad == 0) ? cj : 0; // END FIX OF STREAM VALIDATION - OLD VALIDATION CAN NOW TAKE OVER #ifdef VERBOSE if (doIO) { fprintf( outFile, "Results Comparison: \n"); fprintf( outFile, " Expected : %f %f %f \n",aj,bj,cj); fprintf( outFile, " Observed : %f %f %f \n",asum,bsum,csum); } #endif epsilon = 1.e-8; *failure = 1; if (fabs(aj-asum)/asum > epsilon) { if (doIO) { fprintf( outFile, "Failed Validation on array a[]\n"); fprintf( outFile, " Expected : %f \n",aj); fprintf( outFile, " Observed : %f \n",asum); } } else if (fabs(bj-bsum)/bsum > epsilon) { if (doIO) { fprintf( outFile, "Failed Validation on array b[]\n"); fprintf( outFile, " Expected : %f \n",bj); fprintf( outFile, " Observed : %f \n",bsum); } } else if (fabs(cj-csum)/csum > epsilon) { if (doIO) { fprintf( outFile, "Failed Validation on array c[]\n"); fprintf( outFile, " Expected : %f \n",cj); fprintf( outFile, " Observed : %f \n",csum); } } else { *failure = 0; if (doIO) fprintf( outFile, "Solution Validates\n"); } } # define M 20 static int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = Mmin(minDelta, Mmax(Delta,0)); } return(minDelta); } #undef M int HPCC_Stream(HPCC_Params *params, int doIO, double *copyGBs, double *scaleGBs, double *addGBs, double *triadGBs, int *failure) { int quantum; int BytesPerWord; register int j, k; double scalar, t, times[4][NTIMES]; FILE *outFile; double GiBs = 1073741824.0, curGBs; if (doIO) { // outFile = fopen( params->outFname, "w+" ); outFile = stdout; if (! outFile) { outFile = stderr; fprintf( outFile, "Cannot open output file.\n" ); return 1; } } // VectorSize = HPCC_LocalVectorSize( params, 3, sizeof(double), 0 ); /* Need 3 vectors */ // HARDCODED VectorSize // params->StreamVectorSize = VectorSize; a = HPCC_XMALLOC( double, VectorSize ); b = HPCC_XMALLOC( double, VectorSize ); c = HPCC_XMALLOC( double, VectorSize ); if (!a || !b || !c) { if (c) HPCC_free(c); if (b) HPCC_free(b); if (a) HPCC_free(a); if (doIO) { fprintf( outFile, "Failed to allocate memory (%lu).\n", VectorSize ); fflush( outFile ); fclose( outFile ); } return 1; } /* --- SETUP --- determine precision and check timing --- */ if (doIO) { fprintf (outFile, "Generated on %s\n", params->nowASCII); fprintf( outFile, HLINE); BytesPerWord = sizeof(double); fprintf( outFile, "This system uses %d bytes per DOUBLE PRECISION word.\n", BytesPerWord); fprintf( outFile, HLINE); fprintf( outFile, "Array size = %lu, Offset = %d\n" , VectorSize, OFFSET); fprintf( outFile, "Total memory required = %.4f GiB.\n", (3.0 * BytesPerWord) * ( (double) VectorSize / GiBs)); fprintf( outFile, "Each test is run %d times, but only\n", NTIMES); fprintf( outFile, "the *best* time for each is used.\n"); fflush ( outFile); } #ifdef _OPENMP if (doIO) fprintf( outFile, HLINE); #pragma omp parallel private(k) { #pragma omp single nowait { k = omp_get_num_threads(); if (doIO) fprintf( outFile, "Number of Threads requested = %i\n",k); params->StreamThreads = k; } } #endif /* Get initial value for system clock. */ #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<VectorSize; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } if (doIO) fprintf( outFile, HLINE); if ( (quantum = checktick()) >= 1) { if (doIO) fprintf( outFile, "Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); } else { if (doIO) fprintf( outFile, "Your clock granularity appears to be " "less than one microsecond.\n"); } t = mysecond(); #ifdef _OPENMP #pragma omp parallel for #endif for (j = 0; j < VectorSize; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); if (doIO) { fprintf( outFile, "Each test below will take on the order" " of %d microseconds.\n", (int) t ); fprintf( outFile, " (= %d clock ticks)\n", (int) (t/quantum) ); fprintf( outFile, "Increase the size of the arrays if this shows that\n"); fprintf( outFile, "you are not getting at least 20 clock ticks per test.\n"); fprintf( outFile, HLINE); fprintf( outFile, "WARNING -- The above is only a rough guideline.\n"); fprintf( outFile, "For best results, please be sure you know the\n"); fprintf( outFile, "precision of your system timer.\n"); fprintf( outFile, HLINE); } /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<VectorSize; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<VectorSize; j++) b[j] = scalar*c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<VectorSize; j++) c[j] = a[j]+b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<VectorSize; j++) a[j] = b[j]+scalar*c[j]; #endif times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = Mmin(mintime[j], times[j][k]); maxtime[j] = Mmax(maxtime[j], times[j][k]); } } if (doIO) fprintf( outFile, "Function Rate (GB/s) Avg time Min time Max time\n"); for (j=0; j<4; j++) { avgtime[j] /= (double)(NTIMES - 1); /* note -- skip first iteration */ /* make sure no division by zero */ curGBs = (mintime[j] > 0.0 ? 1.0 / mintime[j] : -1.0); curGBs *= 1e-9 * bytes[j] * VectorSize; if (doIO) fprintf( outFile, "%s%11.4f %11.4f %11.4f %11.4f\n", label[j], curGBs, avgtime[j], mintime[j], maxtime[j]); switch (j) { case 0: *copyGBs = curGBs; break; case 1: *scaleGBs = curGBs; break; case 2: *addGBs = curGBs; break; case 3: *triadGBs = curGBs; break; } } if (doIO) fprintf( outFile, HLINE); /* --- Check Results --- */ checkSTREAMresults( outFile, doIO, failure ); if (doIO) fprintf( outFile, HLINE); HPCC_free(c); HPCC_free(b); HPCC_free(a); if (doIO) { fflush( outFile ); fclose( outFile ); } return 0; } void tuned_STREAM_Copy() { uint64_t j; #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<VectorSize; j++) c[j] = a[j]; } void tuned_STREAM_Scale(double scalar) { uint64_t j; #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<VectorSize; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { uint64_t j; #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<VectorSize; j++) c[j] = a[j]+b[j]; } void tuned_STREAM_Triad(double scalar) { uint64_t j; #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<VectorSize; j++) { a[j] = b[j]+scalar*c[j]; } } int main () { HPCC_Params *params = initialize(); double copyGBs, scaleGBs, addGBs, triadGBs; int failure; HPCC_Stream (params, 1, &copyGBs, &scaleGBs, &addGBs, &triadGBs, &failure); return 0; }
loop-11.c
#include <omp.h> #include <stdlib.h> #include <string.h> int test1 (void) { short int buf[64], *p; int i; memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[10]; &buf[54] > p; p++) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[3]; &buf[63] >= p; p += 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[16]; &buf[51] > p; p = 4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[53]; &buf[9] < p; --p) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[63]; &buf[3] <= p; p -= 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[48]; &buf[15] < p; p = -4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); return 0; } int test2 (void) { int buf[64], *p; int i; memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[10]; &buf[54] > p; p++) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[3]; &buf[63] >= p; p += 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[16]; &buf[51] > p; p = 4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[53]; &buf[9] < p; --p) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[63]; &buf[3] <= p; p -= 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[48]; &buf[15] < p; p = -4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (static, 3) for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); return 0; } int test3 (void) { int buf[64], *p; int i; memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (dynamic, 3) for (p = &buf[10]; &buf[54] > p; p++) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (dynamic, 3) for (p = &buf[3]; &buf[63] >= p; p += 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (dynamic, 3) for (p = &buf[16]; &buf[51] > p; p = 4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (dynamic, 3) for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (dynamic, 3) for (p = &buf[53]; &buf[9] < p; --p) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (dynamic, 3) for (p = &buf[63]; &buf[3] <= p; p -= 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (dynamic, 3) for (p = &buf[48]; &buf[15] < p; p = -4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (dynamic, 3) for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); return 0; } int test4 (void) { int buf[64], *p; int i; memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (runtime) for (p = &buf[10]; &buf[54] > p; p++) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (runtime) for (p = &buf[3]; &buf[63] >= p; p += 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (runtime) for (p = &buf[16]; &buf[51] > p; p = 4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (runtime) for (p = &buf[16]; &buf[40] >= p; p = p + 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (runtime) for (p = &buf[53]; &buf[9] < p; --p) *p = 5; for (i = 0; i < 64; i++) if (buf[i] != 5 * (i >= 10 && i < 54)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (runtime) for (p = &buf[63]; &buf[3] <= p; p -= 2) p[-2] = 6; for (i = 0; i < 64; i++) if (buf[i] != 6 * ((i & 1) && i <= 61)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (runtime) for (p = &buf[48]; &buf[15] < p; p = -4 + p) p[2] = 7; for (i = 0; i < 64; i++) if (buf[i] != 7 * ((i & 3) == 2 && i >= 18 && i < 53)) abort (); memset (buf, '\0', sizeof (buf)); #pragma omp parallel for schedule (runtime) for (p = &buf[40]; &buf[16] <= p; p = p - 4ULL) p[2] = -7; for (i = 0; i < 64; i++) if (buf[i] != -7 * ((i & 3) == 2 && i >= 18 && i <= 42)) abort (); return 0; } int main (void) { test1 (); test2 (); test3 (); omp_set_schedule (omp_sched_static, 0); test4 (); omp_set_schedule (omp_sched_static, 3); test4 (); omp_set_schedule (omp_sched_dynamic, 5); test4 (); omp_set_schedule (omp_sched_guided, 2); test4 (); return 0; }
matrix_multiply_omp_cache_optimized.c
/************************************************************ Author : Ali Snedden Date : 8/21/18 License: MIT Purpose: This is a program that multiplies two matrices. Debug : Notes : 1. To run : export OMP_NUM_THREADS=20 gcc -O3 -fopenmp src/matrix_multiply_omp.c ### -O3 is critical Good Weblinks: 1. Unified Memory : https://devblogs.nvidia.com/unified-memory-cuda-beginners/ Future : 1. Try managing memory directly on Host and Device. ************************************************************/ #include <string.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <omp.h> /********************************** ARGS: RETURN: DESCRIPTION: Map 2D indices to 1D index DEBUG: 1. read_numpy_matrix() uses this function extensively. Directly compared output from read_numpy_matrix() with input and was IDENTICAL. This could not work if map_idx() didn't function correctly. FUTURE: 1. Add error checking if not too expensive ***********************************/ int map_idx(int i, int j, int Ny){ return (Ny * i + j); } /******************************************************** ARGS: DESCRIPTION: RETURN: DEBUG: NOTES: FUTURE: *******************************************************/ void exit_with_error(char * message){ fprintf(stderr, "%s\n", message); fflush(stderr); exit(1); } /********************************** ARGS: path = path to file to read dim = dimension of returned matrix, expected to be len = 2 RETURN: DESCRIPTION: Map 2D indices to 1D index DEBUG: 1. Printed out read in matrix. used 'diff' to compare with original. Was IDENTICAL --> This function WORKS! FUTURE: 1. Add error checking if not too expensive ***********************************/ float * read_numpy_matrix_row_majored(char* path, int * dim){ char * line= NULL; char * entireFile = NULL; char * pch = NULL; // Used for parsing strings w strtok char errStr[500]; int fileSize = -1; int nline = 0; int maxchar = 0; // Maximum number of characters in a lines int nchar = 0; // Number of characters in line int ncols = -1; // Ncolumns in each row.. should be the same for each row int ncolsThisRow = 0; int i = 0; int j = 0; int n = 0; // Index to loop thru _all_ file chars float * matrix = NULL; FILE * f = fopen(path, "r"); printf("\treading : %s\n", path); fflush(stdout); //Error check if(f == NULL){ sprintf(errStr, "ERROR!!! %s cannot be opened", path); exit_with_error(errStr); } //Get file size fseek(f, 0, SEEK_END); fileSize = ftell(f); // Total num chars in file rewind(f); //Read entire file entireFile = (char* )malloc(sizeof(char) * fileSize); fread(entireFile, sizeof(char), fileSize, f); rewind(f); //Find number of lines and maxchar per line... for(n=0; n<fileSize; n++){ if(entireFile[n] == ' '){ ncolsThisRow++; } if(entireFile[n] == '\n'){ maxchar = nchar > maxchar ? nchar : maxchar; //Must set at first if(nline == 0){ ncols = ncolsThisRow; //Oops, rows aren't the same size. }else if(ncols != ncolsThisRow){ sprintf(errStr, "ERROR!!! nchar %i != ncolsThisRow %i\n", nchar, ncolsThisRow); exit_with_error(errStr); } ncolsThisRow=0; nchar = 0; nline++; } nchar++; } maxchar = maxchar + 1; //+1 for null terminator? printf("\tdim = [nline, ncols] = [%i, %i], maxchar = %i \n", nline, ncols, maxchar); fflush(stdout); // Done with busy work - now allocate memory, read in array matrix = (float *)malloc(nline * maxchar * sizeof(float)); line = (char *)malloc(sizeof(char) * maxchar); i = 0; while(feof(f) == 0){ if(fgets(line, maxchar, f) == NULL){ printf("\tEnd of File Reached\n\n"); //sprintf(errStr, "ERROR!!! in reading 'line'\n"); //exit_with_error(errStr); } // Parse line in file pch = strtok(line," "); j = 0; while(pch != NULL){ matrix[map_idx(i,j,ncols)] = (float)atof(pch); pch = strtok(NULL, " "); j++; } i++; } /* Debug for(i=0; i<nline; i++){ for(j=0; j<ncols; j++){ printf("%.1f ", matrix[map_idx(i,j,ncols)]); } printf("\n"); }*/ free(line); free(entireFile); fclose(f); dim[0] = nline; dim[1] = ncols; return matrix; } /************************************************************* ARGS: float * A : 2 x 2 matrix, stored as row majored in 1D array int * dim : len(dim) = 2 RETURN: -> newM, is a matrix that is column ordered matrix. -> dim is unchanged DESCRIPTION: Take Transpose DEBUG: 1. Spot checked beginning, middle and end of matrix. It appears that I correctly switched from row-majored to column majored matrix FUTURE: **************************************************************/ float * reorder_row_major_as_col_major(float * B, int * dim){ int i,j; // Indices float * newM = (float *)malloc(sizeof(float) * dim[0] * dim[1]); //rows for(i=0; i<dim[0]; i++){ for(j=0; j<dim[1]; j++){ newM[map_idx(j,i,dim[0])] = B[map_idx(i,j,dim[1])]; // dim[0] or dim[1] for newM? //newM[map_idx(i,j,dim[0])] = B[map_idx(j,i,dim[1])]; // dim[0] or dim[1] for newM? } } printf("Re-ordering matrix B...\n"); free(B); return(newM); } /********************************** ARGS: array1D : 'flattened' 2D array as 1D N : length of array RETURN: N/A DESCRIPTION: Prints 1D array and 3D coords DEBUG: 1. spot checked, it works FUTURE: ***********************************/ void write_1D_array(float * array1D, int Nx, int Ny, FILE * f){ int i = 0; int j = 0; int idx = 0; for(i=0; i<Nx; i++){ for(j=0; j<Ny; j++){ idx = map_idx(i,j,Ny); fprintf(f, "%*.1f ", 5, array1D[idx]); } fprintf(f, "\n"); } } /********************************** ARGS: array1D : 'flattened' 2D array as 1D N : length of array RETURN: N/A DESCRIPTION: Prints 1D array and 3D coords DEBUG: 1. spot checked, it works FUTURE: ***********************************/ void print_1D_array(float * array1D, int Nx, int Ny){ int i = 0; int j = 0; int idx = 0; for(i=0; i<Nx; i++){ for(j=0; j<Ny; j++){ idx = map_idx(i,j,Ny); printf("%*.1f ", 5, array1D[idx]); } printf("\n"); } } /******************************************************** ARGS: DESCRIPTION: RETURN: DEBUG: NOTES: 1. Use 'flattened' 2D array FUTURE: *******************************************************/ void initialize_matrix(float *A, int * dim, float value){ int i; int j; for(i=0; i<dim[0]; i++){ for(j=0; j<dim[1]; j++){ //A[i*dim[0]+j] = value; A[map_idx(i,j,dim[1])] = value; } } } /******************************************************** ARGS: A : 'flattened' 2d matrix. row majored B : 'flattened' 2d matrix. column majored dimA : gives x & y dims dimB : gives x & y dims dimAB: pointer modified to return size of new matrix DESCRIPTION: Multiply A*B : Check dims. Expect only 2 dimensions for dimA and dimB. RETURN: DEBUG: 1. created code, matrix_generator.py, that multiplies two matrices and saves the input and output to a file. I read in data/A.txt, data/B.txt and used this function to multiply the matrices. Printed the output and compared to data/AB.txt. It was IDENTICAL. --> This function works! NOTES: FUTURE: *******************************************************/ float * omp_matrix_multiply(float * A, float * B, int * dimA, int * dimB, int * dimAB) { int j = 0; // Iterate over elements, do dot product int ai = 0; // Index iterating over rows in A int bj = 0; // Index iterating over columns in B int tid; int nthreads; float sum = 0; char errStr[500]; float * result = (float *)malloc(sizeof(float) * dimA[0] * dimB[1]); // Error Check if(dimA[1] != dimB[0]){ sprintf(errStr, "ERROR!! dimension mismatch, %i != %i\n", dimA[1], dimB[0]); exit_with_error(errStr); } #pragma omp parallel private(nthreads, tid, sum, bj, ai, j) shared(dimA, dimB, result) { #if defined(_OPENMP) tid = omp_get_thread_num(); printf("%i / %i reporting for duty\n", tid, omp_get_num_threads()); #endif #pragma omp for for(ai=0; ai<dimA[0]; ai++){ for(j=0; j<dimB[1]; j++){ sum = 0; for(bj=0; bj<dimB[0]; bj++){ //for(j=0; j<dimA[1]; j++){ //printf("%.0f * %0.f\n", A[map_idx(ai, j, dimA[1])], // B[map_idx(j, bj, dimB[1])]); //sum += A[map_idx(ai, j, dimA[1])] * B[map_idx(j, bj, dimB[1])]; sum += A[map_idx(ai, bj, dimA[1])] * B[map_idx(j, bj, dimB[0])]; //} } result[map_idx(ai,j,dimB[1])] = sum; } } } dimAB[0] = dimA[0]; dimAB[1] = dimB[1]; return result; } /******************************************************** ARGS: At command line : matrix_multiply path/to/A.txt path/to/B.txt path/to/output/ DESCRIPTION: RETURN: DEBUG: NOTES: FUTURE: *******************************************************/ int main(int argc, char * argv[]) { // Declare variables char path[100]; char errStr[500]; int nDev = 0; //Number of devices int * dimA = NULL; int * dimB = NULL; int * dimAB = NULL; float *A = NULL; float *B = NULL; float *AB = NULL; float *answer = NULL; FILE * fout = NULL; dimA = (int *) malloc(2 * sizeof(int)); dimB = (int *) malloc(2 * sizeof(int)); dimAB= (int *) malloc(2 * sizeof(int)); printf("Running matrix_multiply_omp_cache_optimized.c ...\n"); //sprintf(path, "data/very_large/A.txt"); A = read_numpy_matrix_row_majored(argv[1], dimA); //sprintf(path, "data/very_large/B.txt"); B = read_numpy_matrix_row_majored(argv[2], dimB); B = reorder_row_major_as_col_major(B, dimB); dimAB[0] = dimA[0]; dimAB[1] = dimB[1]; //AB = cpu_matrix_multiply(A, B, dimA, dimB, dimAB); fflush(stdout); time_t start = time(NULL); AB = omp_matrix_multiply(A, B, dimA, dimB, dimAB); printf("Run time : %.3f s\n", difftime(time(NULL), start)); // Output sprintf(path, "%s/AB_result.txt", argv[3]); fout = fopen(path, "w+"); if(fout == NULL){ sprintf(errStr, "ERROR!! Cannot create, %s\n", path); exit_with_error(errStr); } write_1D_array(AB, dimAB[0], dimAB[1], fout); fclose(fout); free(dimA); free(dimB); free(dimAB); free(A); free(B); free(AB); return 0; }
Blank.h
/// \ingroup base /// \class ttk::Blank /// \author Your Name Here <Your Email Address Here> /// \date The Date Here. /// /// \brief TTK %blank processing package. /// /// %Blank is a TTK processing package that takes a scalar field on the input /// and produces a scalar field on the output. /// /// \sa ttk::Triangulation /// \sa ttkBlank.cpp %for a usage example. #pragma once // base code includes #include <Triangulation.h> #include <Wrapper.h> namespace ttk{ class Blank : public Debug{ public: Blank(); ~Blank(); /// Execute the package. /// \pre If this TTK package uses ttk::Triangulation for fast mesh /// traversals, the function setupTriangulation() must be called on this /// object prior to this function, in a clearly distinct pre-processing /// steps. An error will be returned otherwise. /// \note In such a case, it is recommended to exclude /// setupTriangulation() from any time performance measurement. /// \param argment Dummy integer argument. /// \return Returns 0 upon success, negative values otherwise. template <class dataType> int execute(const int &argument) const; /// Pass a pointer to an input array representing a scalarfield. /// The expected format for the array is the following: /// <vertex0-component0> <vertex0-component1> ... <vertex0-componentN> /// <vertex1-component0> <vertex1-component1> ... <vertex1-componentN> /// <vertexM-component0> <vertexM-component1> ... <vertexM-componentN>. /// The array is expected to be correctly allocated. /// \param data Pointer to the data array. /// \return Returns 0 upon success, negative values otherwise. /// \sa setVertexNumber() and setDimensionNumber(). inline int setInputDataPointer(void *data){ inputData_ = data; return 0; } /// Pass a pointer to an output array representing a scalar field. /// The expected format for the array is the following: /// <vertex0-component0> <vertex0-component1> ... <vertex0-componentN> /// <vertex1-component0> <vertex1-component1> ... <vertex1-componentN> /// <vertexM-component0> <vertexM-component1> ... <vertexM-componentN>. /// The array is expected to be correctly allocated. /// \param data Pointer to the data array. /// \return Returns 0 upon success, negative values otherwise. /// \sa setVertexNumber() and setDimensionNumber(). inline int setOutputDataPointer(void *data){ outputData_ = data; return 0; } // General documentation info: // /// Setup a (valid) triangulation object for this TTK base object. /// /// \pre This function should be called prior to any usage of this TTK /// object, in a clearly distinct pre-processing step that involves no /// traversal or computation at all. An error will be returned otherwise. /// /// \note It is recommended to exclude this pre-processing function from /// any time performance measurement. Therefore, it is recommended to /// call this function ONLY in the pre-processing steps of your program. /// Note however, that your triangulation object must be valid when /// calling this function (i.e. you should have filled it at this point, /// see the setInput*() functions of ttk::Triangulation). See ttkBlank /// for further examples. /// /// \param triangulation Pointer to a valid triangulation. /// \return Returns 0 upon success, negative values otherwise. /// \sa ttk::Triangulation // // // Developer info: // ttk::Triangulation is a generic triangulation representation that // enables fast mesh traversal, either on explicit triangulations (i.e. // tet-meshes) or implicit triangulations (i.e. low-memory footprint // implicit triangulations obtained from regular grids). // // Not all TTK packages need such mesh traversal features. If your // TTK package needs any mesh traversal procedure, we recommend to use // ttk::Triangulation as described here. // // Each call to a traversal procedure of ttk::Triangulation // must satisfy some pre-condition (see ttk::Triangulation for more // details). Such pre-condition functions are typically called from this // function. inline int setupTriangulation(Triangulation *triangulation){ triangulation_ = triangulation; if(triangulation_){ // TODO-1 // Pre-condition functions. // Call all the required pre-condition functions here! // for example: triangulation_->preprocessVertexNeighbors(); // end of TODO-1 } return 0; } protected: void *inputData_, *outputData_; Triangulation *triangulation_; }; } // if the package is a pure template class, uncomment the following line // #include <Blank.cpp> // template functions template <class dataType> int ttk::Blank::execute( const int &argument) const{ Timer t; // check the consistency of the variables -- to adapt #ifndef TTK_ENABLE_KAMIKAZE if(!triangulation_) return -1; if(!inputData_) return -2; if(!outputData_) return -3; #endif dataType *outputData = (dataType *) outputData_; dataType *inputData = (dataType *) inputData_; SimplexId vertexNumber = triangulation_->getNumberOfVertices(); // init the output -- to adapt for(SimplexId i = 0; i < vertexNumber; i++){ outputData[i] = inputData[i]; } // the following open-mp processing is only relevant for embarrassingly // parallel algorithms (such as smoothing) -- to adapt #ifdef TTK_ENABLE_OPENMP #pragma omp parallel for num_threads(threadNumber_) #endif for(SimplexId i = 0; i < vertexNumber; i++){ // TODO-2 // processing here! // end of TODO-2 } { std::stringstream msg; msg << "[Blank] Data-set (" << vertexNumber << " points) processed in " << t.getElapsedTime() << " s. (" << threadNumber_ << " thread(s))." << std::endl; dMsg(std::cout, msg.str(), timeMsg); } return 0; }
GB_split_sparse.c
//------------------------------------------------------------------------------ // GB_split_sparse: split a sparse/hypersparse matrix into tiles //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #define GB_FREE_WORKSPACE \ GB_WERK_POP (C_ek_slicing, int64_t) ; \ GB_FREE_WORK (&Wp, Wp_size) ; #define GB_FREE_ALL \ GB_FREE_WORKSPACE ; \ GB_Matrix_free (&C) ; #include "GB_split.h" GrB_Info GB_split_sparse // split a sparse matrix ( GrB_Matrix *Tiles, // 2D row-major array of size m-by-n const GrB_Index m, const GrB_Index n, const int64_t *restrict Tile_rows, // size m+1 const int64_t *restrict Tile_cols, // size n+1 const GrB_Matrix A, // input matrix GB_Context Context ) { //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GrB_Info info ; int A_sparsity = GB_sparsity (A) ; bool A_is_hyper = (A_sparsity == GxB_HYPERSPARSE) ; ASSERT (A_is_hyper || A_sparsity == GxB_SPARSE) ; GrB_Matrix C = NULL ; GB_WERK_DECLARE (C_ek_slicing, int64_t) ; ASSERT_MATRIX_OK (A, "A sparse for split", GB0) ; int sparsity_control = A->sparsity_control ; float hyper_switch = A->hyper_switch ; bool csc = A->is_csc ; GrB_Type atype = A->type ; int64_t avlen = A->vlen ; int64_t avdim = A->vdim ; size_t asize = atype->size ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int64_t nouter = csc ? n : m ; int64_t ninner = csc ? m : n ; const int64_t *Tile_vdim = csc ? Tile_cols : Tile_rows ; const int64_t *Tile_vlen = csc ? Tile_rows : Tile_cols ; int64_t anvec = A->nvec ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; const bool A_iso = A->iso ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- size_t Wp_size = 0 ; int64_t *restrict Wp = NULL ; Wp = GB_MALLOC_WORK (anvec, int64_t, &Wp_size) ; if (Wp == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } GB_memcpy (Wp, Ap, anvec * sizeof (int64_t), nthreads_max) ; //-------------------------------------------------------------------------- // split A into tiles //-------------------------------------------------------------------------- int64_t akend = 0 ; for (int64_t outer = 0 ; outer < nouter ; outer++) { //---------------------------------------------------------------------- // find the starting and ending vector of these tiles //---------------------------------------------------------------------- // The tile appears in vectors avstart:avend-1 of A, and indices // aistart:aiend-1. const int64_t avstart = Tile_vdim [outer] ; const int64_t avend = Tile_vdim [outer+1] ; int64_t akstart = akend ; if (A_is_hyper) { // A is hypersparse: look for vector avend in the A->h hyper list. // The vectors to handle for this outer loop are in // Ah [akstart:akend-1]. akend = akstart ; int64_t pright = anvec - 1 ; bool found ; GB_SPLIT_BINARY_SEARCH (avend, Ah, akend, pright, found) ; ASSERT (GB_IMPLIES (akstart <= akend-1, Ah [akend-1] < avend)) ; } else { // A is sparse; the vectors to handle are akstart:akend-1 akend = avend ; } // # of vectors in all tiles in this outer loop int64_t cnvec = akend - akstart ; int nth = GB_nthreads (cnvec, chunk, nthreads_max) ; //---------------------------------------------------------------------- // create all tiles for vectors akstart:akend-1 in A //---------------------------------------------------------------------- for (int64_t inner = 0 ; inner < ninner ; inner++) { //------------------------------------------------------------------ // allocate C, C->p, and C->h for this tile //------------------------------------------------------------------ const int64_t aistart = Tile_vlen [inner] ; const int64_t aiend = Tile_vlen [inner+1] ; const int64_t cvdim = avend - avstart ; const int64_t cvlen = aiend - aistart ; C = NULL ; GB_OK (GB_new (&C, // new header atype, cvlen, cvdim, GB_Ap_malloc, csc, A_sparsity, hyper_switch, cnvec, Context)) ; C->sparsity_control = sparsity_control ; C->hyper_switch = hyper_switch ; C->nvec = cnvec ; int64_t *restrict Cp = C->p ; int64_t *restrict Ch = C->h ; //------------------------------------------------------------------ // determine the boundaries of this tile //------------------------------------------------------------------ int64_t k ; #pragma omp parallel for num_threads(nth) schedule(static) for (k = akstart ; k < akend ; k++) { int64_t pA = Wp [k] ; const int64_t pA_end = Ap [k+1] ; const int64_t aknz = pA_end - pA ; if (aknz == 0 || Ai [pA] >= aiend) { // this vector of C is empty } else if (aknz > 256) { // use binary search to find aiend bool found ; int64_t pright = pA_end - 1 ; GB_SPLIT_BINARY_SEARCH (aiend, Ai, pA, pright, found) ; #ifdef GB_DEBUG // check the results with a linear search int64_t p2 = Wp [k] ; for ( ; p2 < Ap [k+1] ; p2++) { if (Ai [p2] >= aiend) break ; } ASSERT (pA == p2) ; #endif } else { // use a linear-time search to find aiend for ( ; pA < pA_end ; pA++) { if (Ai [pA] >= aiend) break ; } #ifdef GB_DEBUG // check the results with a binary search bool found ; int64_t p2 = Wp [k] ; int64_t p2_end = Ap [k+1] - 1 ; GB_SPLIT_BINARY_SEARCH (aiend, Ai, p2, p2_end, found) ; ASSERT (pA == p2) ; #endif } Cp [k-akstart] = (pA - Wp [k]) ; // # of entries in this vector if (A_is_hyper) { Ch [k-akstart] = Ah [k] - avstart ; } } GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nth, Context) ; int64_t cnz = Cp [cnvec] ; //------------------------------------------------------------------ // allocate C->i and C->x for this tile //------------------------------------------------------------------ // set C->iso = A_iso OK GB_OK (GB_bix_alloc (C, cnz, GxB_SPARSE, false, true, A_iso, Context)) ; int64_t *restrict Ci = C->i ; C->magic = GB_MAGIC ; // for GB_nnz_held(C), to slice C //------------------------------------------------------------------ // copy the tile from A into C //------------------------------------------------------------------ int C_ntasks, C_nthreads ; GB_SLICE_MATRIX (C, 8, chunk) ; bool done = false ; if (A_iso) { //-------------------------------------------------------------- // split an iso matrix A into an iso tile C //-------------------------------------------------------------- // A is iso and so is C; copy the iso entry GBURBLE ("(iso sparse split) ") ; memcpy (C->x, A->x, asize) ; #define GB_ISO_SPLIT #define GB_COPY(pC,pA) ; #include "GB_split_sparse_template.c" } else { //-------------------------------------------------------------- // split a non-iso matrix A into an non-iso tile C //-------------------------------------------------------------- #ifndef GBCOMPACT // no typecasting needed switch (asize) { #undef GB_COPY #define GB_COPY(pC,pA) Cx [pC] = Ax [pA] ; case GB_1BYTE : // uint8, int8, bool, or 1-byte user-defined #define GB_CTYPE uint8_t #include "GB_split_sparse_template.c" break ; case GB_2BYTE : // uint16, int16, or 2-byte user-defined #define GB_CTYPE uint16_t #include "GB_split_sparse_template.c" break ; case GB_4BYTE : // uint32, int32, float, or 4-byte user #define GB_CTYPE uint32_t #include "GB_split_sparse_template.c" break ; case GB_8BYTE : // uint64, int64, double, float complex, // or 8-byte user defined #define GB_CTYPE uint64_t #include "GB_split_sparse_template.c" break ; case GB_16BYTE : // double complex or 16-byte user-defined #define GB_CTYPE GB_blob16 // #define GB_CTYPE uint64_t // #undef GB_COPY // #define GB_COPY(pC,pA) \ // Cx [2*pC ] = Ax [2*pA ] ; \ // Cx [2*pC+1] = Ax [2*pA+1] ; #include "GB_split_sparse_template.c" break ; default:; } #endif } if (!done) { // user-defined types #define GB_CTYPE GB_void #undef GB_COPY #define GB_COPY(pC,pA) \ memcpy (Cx + (pC)*asize, Ax +(pA)*asize, asize) ; #include "GB_split_sparse_template.c" } //------------------------------------------------------------------ // free workspace //------------------------------------------------------------------ GB_WERK_POP (C_ek_slicing, int64_t) ; //------------------------------------------------------------------ // advance to the next tile //------------------------------------------------------------------ if (inner < ninner - 1) { int64_t k ; #pragma omp parallel for num_threads(nth) schedule(static) for (k = akstart ; k < akend ; k++) { int64_t ck = k - akstart ; int64_t cknz = Cp [ck+1] - Cp [ck] ; Wp [k] += cknz ; } } //------------------------------------------------------------------ // conform the tile and save it in the Tiles array //------------------------------------------------------------------ ASSERT_MATRIX_OK (C, "C for GB_split", GB0) ; GB_OK (GB_hypermatrix_prune (C, Context)) ; GB_OK (GB_conform (C, Context)) ; if (csc) { GB_TILE (Tiles, inner, outer) = C ; } else { GB_TILE (Tiles, outer, inner) = C ; } ASSERT_MATRIX_OK (C, "final tile C for GB_split", GB0) ; C = NULL ; } } GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; }
matching.h
#ifndef REGISTRATION_MATCHING_H #define REGISTRATION_MATCHING_H #include <unordered_set> #include <utility> #include <opencv2/features2d.hpp> #include <pcl/common/norms.h> #include <pcl/point_cloud.h> #include <pcl/search/kdtree.h> #include <pcl/common/transforms.h> #include "common.h" #define MATCHING_RATIO_THRESHOLD 0.95f #define MATCHING_CLUSTER_THRESHOLD 0.8f #define MATCHING_CLUSTER_RADIUS_COEF 7.f template<typename FeatureT> class FeatureMatcher { public: using Ptr = std::shared_ptr<FeatureMatcher<FeatureT>>; using ConstPtr = std::shared_ptr<const FeatureMatcher<FeatureT>>; using KdTreeConstPtr = typename pcl::search::KdTree<PointN>::ConstPtr; virtual pcl::Correspondences match(const typename pcl::PointCloud<FeatureT>::ConstPtr &src, const typename pcl::PointCloud<FeatureT>::ConstPtr &tgt, const KdTreeConstPtr &pcd_tree_src, const KdTreeConstPtr &pcd_tree_tgt, const typename pcl::PointRepresentation<FeatureT>::Ptr &point_representation, int threads) = 0; inline float getAverageDistance() const { return average_distance_; } virtual std::string getClassName() = 0; protected: void printDebugInfo(const std::vector<MultivaluedCorrespondence> &mv_correspondences) { float dists_sum = 0.f; int n_dists = 0; for (int i = 0; i < mv_correspondences.size(); i++) { if (mv_correspondences[i].query_idx >= 0) { dists_sum += mv_correspondences[i].distances[0]; n_dists++; } } if (n_dists == 0) { PCL_ERROR("[%s::match] no distances were calculated.\n", getClassName().c_str()); } else { average_distance_ = dists_sum / (float) n_dists; PCL_DEBUG("[%s::match] average distance to nearest neighbour: %0.7f.\n", getClassName().c_str(), average_distance_); } } float average_distance_ = std::numeric_limits<float>::max(); AlignmentParameters parameters_; }; template<typename FeatureT> class LeftToRightFeatureMatcher : public FeatureMatcher<FeatureT> { public: using KdTreeConstPtr = typename pcl::search::KdTree<PointN>::ConstPtr; LeftToRightFeatureMatcher() = delete; LeftToRightFeatureMatcher(AlignmentParameters parameters) : parameters_(std::move(parameters)) {}; pcl::Correspondences match(const typename pcl::PointCloud<FeatureT>::ConstPtr &src, const typename pcl::PointCloud<FeatureT>::ConstPtr &tgt, const KdTreeConstPtr &pcd_tree_src, const KdTreeConstPtr &pcd_tree_tgt, const typename pcl::PointRepresentation<FeatureT>::Ptr &point_representation, int threads) override { int nr_dims = point_representation->getNumberOfDimensions(); std::vector<MultivaluedCorrespondence> mv_correspondences_ij; if (this->parameters_.guess != nullptr) { matchLocal<FeatureT>(pcd_tree_src->getInputCloud(), pcd_tree_tgt, src, tgt, mv_correspondences_ij, point_representation, *parameters_.guess, parameters_.match_search_radius, parameters_.randomness, threads); } else if (this->parameters_.use_bfmatcher) { matchBF<FeatureT>(src, tgt, mv_correspondences_ij, point_representation, this->parameters_.randomness, nr_dims, this->parameters_.bf_block_size); } else { matchFLANN<FeatureT>(src, tgt, mv_correspondences_ij, point_representation, this->parameters_.randomness, threads); } this->printDebugInfo(mv_correspondences_ij); std::vector<MultivaluedCorrespondence> mv_correspondences_ji; if (this->parameters_.guess != nullptr) { matchLocal<FeatureT>(pcd_tree_tgt->getInputCloud(), pcd_tree_src, tgt, src, mv_correspondences_ji, point_representation, parameters_.guess->inverse(), parameters_.match_search_radius, parameters_.randomness, threads); } else if (this->parameters_.use_bfmatcher) { matchBF<FeatureT>(tgt, src, mv_correspondences_ji, point_representation, this->parameters_.randomness, nr_dims, this->parameters_.bf_block_size); } else { matchFLANN<FeatureT>(tgt, src, mv_correspondences_ji, point_representation, this->parameters_.randomness, threads); } pcl::Correspondences correspondences_mutual; for (int i = 0; i < src->size(); ++i) { for (const int &j: mv_correspondences_ij[i].match_indices) { auto &corr_j = mv_correspondences_ji[j]; for (int k = 0; k < corr_j.match_indices.size(); ++k) { if (corr_j.match_indices[k] == i) { correspondences_mutual.push_back({i, j, corr_j.distances[k]}); break; } } } } PCL_DEBUG("[%s::match] %i correspondences remain after mutual filtering.\n", getClassName().c_str(), correspondences_mutual.size()); return correspondences_mutual; } inline std::string getClassName() override { return "LeftToRightFeatureMatcher"; } protected: AlignmentParameters parameters_; }; template<typename FeatureT> class RatioFeatureMatcher : public FeatureMatcher<FeatureT> { public: using KdTreeConstPtr = typename pcl::search::KdTree<PointN>::ConstPtr; RatioFeatureMatcher() = delete; RatioFeatureMatcher(AlignmentParameters parameters) : parameters_(std::move(parameters)) {}; pcl::Correspondences match(const typename pcl::PointCloud<FeatureT>::ConstPtr &src, const typename pcl::PointCloud<FeatureT>::ConstPtr &tgt, const KdTreeConstPtr &pcd_tree_src, const KdTreeConstPtr &pcd_tree_tgt, const typename pcl::PointRepresentation<FeatureT>::Ptr &point_representation, int threads) override { if (this->parameters_.randomness != 1) { PCL_WARN("[%s::match] k_corrs different from 1 cannot be used with ratio filtering, using k_corrs = 1.\n", getClassName().c_str()); } int nr_dims = point_representation->getNumberOfDimensions(); std::vector<MultivaluedCorrespondence> mv_correspondences_ij; if (this->parameters_.guess != nullptr) { matchLocal<FeatureT>(pcd_tree_src->getInputCloud(), pcd_tree_tgt, src, tgt, mv_correspondences_ij, point_representation, *parameters_.guess, parameters_.match_search_radius, parameters_.randomness, threads); } else if (this->parameters_.use_bfmatcher) { matchBF<FeatureT>(src, tgt, mv_correspondences_ij, point_representation, 2, nr_dims, this->parameters_.bf_block_size); } else { matchFLANN<FeatureT>(src, tgt, mv_correspondences_ij, point_representation, 2, threads); } this->printDebugInfo(mv_correspondences_ij); float dist1, dist2, ratio; pcl::Correspondences correspondences_ratio; for (auto &mv_corr: mv_correspondences_ij) { if (mv_corr.match_indices.size() != 2) { continue; } dist1 = std::min(mv_corr.distances[0], mv_corr.distances[1]); dist2 = std::max(mv_corr.distances[0], mv_corr.distances[1]); ratio = (dist2 == 0.f) ? 1.f : (dist1 / dist2); if (ratio < MATCHING_RATIO_THRESHOLD) { int i = (dist1 < dist2) ? 0 : 1; correspondences_ratio.push_back({mv_corr.query_idx, mv_corr.match_indices[i], ratio}); } } PCL_DEBUG("[%s::match] %i correspondences remain after ratio filtering.\n", getClassName().c_str(), correspondences_ratio.size()); return correspondences_ratio; } inline std::string getClassName() override { return "RatioFeatureMatcher"; } protected: AlignmentParameters parameters_; }; template<typename FeatureT> class ClusterFeatureMatcher : public FeatureMatcher<FeatureT> { public: using KdTreeConstPtr = typename pcl::search::KdTree<PointN>::ConstPtr; ClusterFeatureMatcher() = delete; ClusterFeatureMatcher(AlignmentParameters parameters) : parameters_(std::move(parameters)) {}; pcl::Correspondences match(const typename pcl::PointCloud<FeatureT>::ConstPtr &src, const typename pcl::PointCloud<FeatureT>::ConstPtr &tgt, const KdTreeConstPtr &pcd_tree_src, const KdTreeConstPtr &pcd_tree_tgt, const typename pcl::PointRepresentation<FeatureT>::Ptr &point_representation, int threads) override { int nr_dims = point_representation->getNumberOfDimensions(); std::vector<MultivaluedCorrespondence> mv_correspondences_ij; if (this->parameters_.guess != nullptr) { matchLocal<FeatureT>(pcd_tree_src->getInputCloud(), pcd_tree_tgt, src, tgt, mv_correspondences_ij, point_representation, *parameters_.guess, parameters_.match_search_radius, parameters_.randomness, threads); } else if (this->parameters_.use_bfmatcher) { matchBF<FeatureT>(src, tgt, mv_correspondences_ij, point_representation, this->parameters_.randomness, nr_dims, this->parameters_.bf_block_size); } else { matchFLANN<FeatureT>(src, tgt, mv_correspondences_ij, point_representation, this->parameters_.randomness, threads); } this->printDebugInfo(mv_correspondences_ij); std::vector<MultivaluedCorrespondence> mv_correspondences_ji; if (this->parameters_.guess != nullptr) { matchLocal<FeatureT>(pcd_tree_tgt->getInputCloud(), pcd_tree_src, tgt, src, mv_correspondences_ji, point_representation, parameters_.guess->inverse(), parameters_.match_search_radius, parameters_.randomness, threads); } else if (this->parameters_.use_bfmatcher) { matchBF<FeatureT>(tgt, src, mv_correspondences_ji, point_representation, this->parameters_.randomness, nr_dims, this->parameters_.bf_block_size); } else { matchFLANN<FeatureT>(tgt, src, mv_correspondences_ji, point_representation, this->parameters_.randomness, threads); } float matching_cluster_radius = MATCHING_CLUSTER_RADIUS_COEF * this->parameters_.voxel_size; pcl::Correspondences correspondences_cluster; for (int i = 0; i < src->size(); ++i) { for (int j: mv_correspondences_ij[i].match_indices) { float distance_i = calculateCorrespondenceDistance(i, j, matching_cluster_radius, mv_correspondences_ij, pcd_tree_src, pcd_tree_tgt); float distance_j = calculateCorrespondenceDistance(j, i, matching_cluster_radius, mv_correspondences_ji, pcd_tree_tgt, pcd_tree_src); if (distance_i < MATCHING_CLUSTER_THRESHOLD && distance_j < MATCHING_CLUSTER_THRESHOLD) { correspondences_cluster.push_back({i, j, distance_i}); } } } PCL_DEBUG("[%s::match] %i correspondences remain after cluster filtering.\n", getClassName().c_str(), correspondences_cluster.size()); return correspondences_cluster; } inline std::string getClassName() override { return "ClusterFeatureMatcher"; } protected: float calculateCorrespondenceDistance(int i, int j, float radius, const std::vector<MultivaluedCorrespondence> &mv_correspondences_ij, const KdTreeConstPtr &pcd_tree_src, const KdTreeConstPtr &pcd_tree_tgt) { std::unordered_set<int> i_neighbors, j_neighbors; pcl::Indices match_indices; std::vector<float> distances; pcd_tree_src->radiusSearch(i, radius, match_indices, distances); std::copy(match_indices.begin(), match_indices.end(), std::inserter(i_neighbors, i_neighbors.begin())); pcd_tree_tgt->radiusSearch(j, radius, match_indices, distances); std::copy(match_indices.begin(), match_indices.end(), std::inserter(j_neighbors, j_neighbors.begin())); int count_consistent_pairs = 0, count_pairs = 0; for (int i_neighbor: i_neighbors) { for (int i_neighbor_match: mv_correspondences_ij[i_neighbor].match_indices) { if (j_neighbors.contains(i_neighbor_match)) { count_consistent_pairs++; } count_pairs++; } } if (count_pairs == 0) { return 0; } return 1.f - (float) count_consistent_pairs / (float) count_pairs; } AlignmentParameters parameters_; }; template<typename FeatureT> typename FeatureMatcher<FeatureT>::Ptr getFeatureMatcher(const AlignmentParameters &parameters) { if (parameters.matching_id == MATCHING_RATIO) { return std::make_shared<RatioFeatureMatcher<FeatureT>>(parameters); } else if (parameters.matching_id == MATCHING_CLUSTER) { return std::make_shared<ClusterFeatureMatcher<FeatureT>>(parameters); } else if (parameters.matching_id != MATCHING_LEFT_TO_RIGHT) { PCL_WARN("[getFeatureMatcher] feature matcher %s isn't supported, left-to-right matcher will be used.", parameters.matching_id.c_str()); } return std::make_shared<LeftToRightFeatureMatcher<FeatureT>>(parameters); } template<typename T> class KNNResult { private: int capacity_; int count_; std::vector<int> indices_; std::vector<T> dists_; public: inline KNNResult(int capacity) : capacity_(capacity), count_(0) { indices_.reserve(capacity); dists_.reserve(capacity); } inline int size() const { return count_; } inline std::vector<int> getIndices() const { return indices_; } inline std::vector<T> getDistances() const { return dists_; } inline bool addPoint(T dist, int index) { if (count_ < capacity_) { indices_.resize(count_ + 1); dists_.resize(count_ + 1); } int i; for (i = count_; i > 0; --i) { if (dists_[i - 1] > dist) { if (i < capacity_) { dists_[i] = dists_[i - 1]; indices_[i] = indices_[i - 1]; } } else { break; } } if (i < capacity_) { dists_[i] = dist; indices_[i] = index; } if (count_ < capacity_) { count_++; } return true; } }; template<typename FeatureT> void pcl2cv(int nr_dims, const typename pcl::PointCloud<FeatureT>::ConstPtr &src, cv::OutputArray &dst, int size = 0, int offset = 0) { if (src->empty()) return; int rows = size == 0 ? (src->size() - offset) : std::min((int) (src->size() - offset), size); cv::Mat _src(rows, nr_dims, CV_32FC1, (void *) &src->points[offset], sizeof(src->points[0])); _src.copyTo(dst); } template<typename FeatureT> void matchFLANN(const typename pcl::PointCloud<FeatureT>::ConstPtr &query_features, const typename pcl::PointCloud<FeatureT>::ConstPtr &train_features, std::vector<MultivaluedCorrespondence> &mv_correspondences, const typename pcl::PointRepresentation<FeatureT>::Ptr point_representation, int k_matches, int threads) { pcl::KdTreeFLANN<FeatureT> feature_tree(new pcl::KdTreeFLANN<FeatureT>); feature_tree.setInputCloud(train_features); auto n = query_features->size(); mv_correspondences.resize(n, MultivaluedCorrespondence{}); #pragma omp parallel for num_threads(threads) default(none) shared(mv_correspondences, query_features, point_representation, feature_tree) firstprivate(n, k_matches) for (int i = 0; i < n; i++) { if (point_representation->isValid(query_features->points[i])) { mv_correspondences[i].query_idx = i; pcl::Indices &match_indices = mv_correspondences[i].match_indices; std::vector<float> &match_distances = mv_correspondences[i].distances; match_indices.resize(k_matches); match_distances.resize(k_matches); feature_tree.nearestKSearch(*query_features, i, k_matches, match_indices, match_distances); for (int j = 0; j < k_matches; ++j) { match_distances[j] = std::sqrt(match_distances[j]); } } } } template<typename FeatureT> void matchBF(const typename pcl::PointCloud<FeatureT>::ConstPtr &query_features, const typename pcl::PointCloud<FeatureT>::ConstPtr &train_features, std::vector<MultivaluedCorrespondence> &mv_correspondences, const typename pcl::PointRepresentation<FeatureT>::Ptr point_representation, int k_matches, int nr_dims, int block_size) { auto matcher = cv::BFMatcher::create(cv::NORM_L2); std::vector<std::vector<cv::DMatch>> matches; mv_correspondences.resize(query_features->size(), MultivaluedCorrespondence{}); int n_query_blocks = (query_features->size() + block_size - 1) / block_size; for (int i = 0; i < n_query_blocks; ++i) { for (int j = 0; j < (train_features->size() + block_size - 1) / block_size; ++j) { cv::UMat query_features_batch, train_features_batch; pcl2cv<FeatureT>(nr_dims, query_features, query_features_batch, block_size, i * block_size); pcl2cv<FeatureT>(nr_dims, train_features, train_features_batch, block_size, j * block_size); matcher->knnMatch(query_features_batch, train_features_batch, matches, k_matches); for (int l = 0; l < matches.size(); ++l) { if (matches[l].empty() || matches[l][0].queryIdx == -1) { continue; } int query_idx_local = matches[l][0].queryIdx; int query_idx = i * block_size + query_idx_local; for (int m = 0; m < matches[l].size(); ++m) { if (matches[l][m].queryIdx != query_idx_local) { PCL_ERROR("[matchBF] unexpected query index in brute-force matches!"); exit(1); } updateMultivaluedCorrespondence(mv_correspondences[query_idx], query_idx, k_matches, j * block_size + matches[l][m].trainIdx, matches[l][m].distance); } } matches.clear(); } PCL_DEBUG("[matchBF] %d / % d blocks processed.\n", i + 1, n_query_blocks); } for (int i = 0; i < query_features->size(); i++) { if (!point_representation->isValid(query_features->points[i])) { mv_correspondences[i] = MultivaluedCorrespondence{}; } } } template<typename FeatureT> void matchLocal(const PointNCloud::ConstPtr &query_pcd, const typename pcl::search::KdTree<PointN>::ConstPtr &train_tree, const typename pcl::PointCloud<FeatureT>::ConstPtr &query_features, const typename pcl::PointCloud<FeatureT>::ConstPtr &train_features, std::vector<MultivaluedCorrespondence> &mv_correspondences, const typename pcl::PointRepresentation<FeatureT>::Ptr point_representation, const Eigen::Matrix4f &guess, float match_search_radius, int k_matches, int threads) { PointNCloud transformed_query_pcd; pcl::transformPointCloudWithNormals(*query_pcd, transformed_query_pcd, guess); auto n = transformed_query_pcd.size(); mv_correspondences.resize(query_features->size(), MultivaluedCorrespondence{}); #pragma omp parallel num_threads(threads) default(none) \ shared(transformed_query_pcd, train_tree, query_features, train_features, mv_correspondences, point_representation) \ firstprivate(n, k_matches, match_search_radius) { std::vector<float> distances; pcl::Indices indices; int nr_dims = point_representation->getNumberOfDimensions(); #pragma omp for for (int query_idx = 0; query_idx < n; ++query_idx) { if (point_representation->isValid(query_features->points[query_idx])) { KNNResult<float> knnResult(k_matches); train_tree->radiusSearch(transformed_query_pcd.points[query_idx], match_search_radius, indices, distances); for (int train_idx: indices) { if (point_representation->isValid(train_features->points[train_idx])) { float dist = pcl::L2_Norm((float *) &query_features->points[query_idx], (float *) &train_features->points[train_idx], nr_dims); knnResult.addPoint(dist, train_idx); } } if (knnResult.size() > 0) { mv_correspondences[query_idx].query_idx = query_idx; mv_correspondences[query_idx].match_indices = knnResult.getIndices(); mv_correspondences[query_idx].distances = knnResult.getDistances(); } } } } } #endif
ep.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - EP This benchmark is an OpenMP C version of the NPB EP code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Author: P. O. Frederickson D. H. Bailey A. C. Woo OpenMP C version: S. Satoh --------------------------------------------------------------------*/ #include "npb-C.h" #include "npbparams.h" /* parameters */ #define MK 16 #define MM (M - MK) #define NN (1 << MM) #define NK (1 << MK) #define NQ 10 #define EPSILON 1.0e-8 #define A 1220703125.0 #define S 271828183.0 #define TIMERS_ENABLED FALSE /* global variables */ /* common /storage/ */ static double x[2*NK]; #pragma omp threadprivate(x) static double q[NQ]; /*-------------------------------------------------------------------- program EMBAR c-------------------------------------------------------------------*/ /* c This is the serial version of the APP Benchmark 1, c the "embarassingly parallel" benchmark. c c M is the Log_2 of the number of complex pairs of uniform (0, 1) random c numbers. MK is the Log_2 of the size of each batch of uniform random c numbers. MK can be set for convenience on a given system, since it does c not affect the results. */ int main(int argc, char **argv) { double Mops, t1, t2, t3, t4, x1, x2, sx, sy, tm, an, tt, gc; double dum[3] = { 1.0, 1.0, 1.0 }; int np, ierr, node, no_nodes, i, ik, kk, l, k, nit, ierrcode, no_large_nodes, np_add, k_offset, j; int nthreads = 1; boolean verified; char size[13+1]; /* character*13 */ /* c Because the size of the problem is too large to store in a 32-bit c integer for some classes, we put it into a string (for printing). c Have to strip off the decimal point put in there by the floating c point print statement (internal file) */ printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - EP Benchmark\n"); sprintf(size, "%12.0f", pow(2.0, M+1)); for (j = 13; j >= 1; j--) { if (size[j] == '.') size[j] = ' '; } printf(" Number of random numbers generated: %13s\n", size); verified = FALSE; /* c Compute the number of "batches" of random number pairs generated c per processor. Adjust if the number of processors does not evenly c divide the total number */ np = NN; /* c Call the random number generator functions and initialize c the x-array to reduce the effects of paging on the timings. c Also, call all mathematical functions that are used. Make c sure these initializations cannot be eliminated as dead code. */ vranlc(0, &(dum[0]), dum[1], &(dum[2])); dum[0] = randlc(&(dum[1]), dum[2]); for (i = 0; i < 2*NK; i++) x[i] = -1.0e99; Mops = log(sqrt(fabs(max(1.0, 1.0)))); timer_clear(1); timer_clear(2); timer_clear(3); timer_start(1); vranlc(0, &t1, A, x); /* Compute AN = A ^ (2 * NK) (mod 2^46). */ t1 = A; for ( i = 1; i <= MK+1; i++) { t2 = randlc(&t1, t1); } an = t1; tt = S; gc = 0.0; sx = 0.0; sy = 0.0; for ( i = 0; i <= NQ - 1; i++) { q[i] = 0.0; } /* c Each instance of this loop may be performed independently. We compute c the k offsets separately to take into account the fact that some nodes c have more numbers to generate than others */ k_offset = -1; #pragma omp parallel copyin(x) { double t1, t2, t3, t4, x1, x2; int kk, i, ik, l; double qq[NQ]; /* private copy of q[0:NQ-1] */ for (i = 0; i < NQ; i++) qq[i] = 0.0; #pragma omp for reduction(+:sx,sy) schedule(static) for (k = 1; k <= np; k++) { kk = k_offset + k; t1 = S; t2 = an; /* Find starting seed t1 for this kk. */ for (i = 1; i <= 100; i++) { ik = kk / 2; if (2 * ik != kk) t3 = randlc(&t1, t2); if (ik == 0) break; t3 = randlc(&t2, t2); kk = ik; } /* Compute uniform pseudorandom numbers. */ if (TIMERS_ENABLED == TRUE) timer_start(3); vranlc(2*NK, &t1, A, x-1); if (TIMERS_ENABLED == TRUE) timer_stop(3); /* c Compute Gaussian deviates by acceptance-rejection method and c tally counts in concentric square annuli. This loop is not c vectorizable. */ if (TIMERS_ENABLED == TRUE) timer_start(2); for ( i = 0; i < NK; i++) { x1 = 2.0 * x[2*i] - 1.0; x2 = 2.0 * x[2*i+1] - 1.0; t1 = pow2(x1) + pow2(x2); if (t1 <= 1.0) { t2 = sqrt(-2.0 * log(t1) / t1); t3 = (x1 * t2); /* Xi */ t4 = (x2 * t2); /* Yi */ l = max(fabs(t3), fabs(t4)); qq[l] += 1.0; /* counts */ sx = sx + t3; /* sum of Xi */ sy = sy + t4; /* sum of Yi */ } } if (TIMERS_ENABLED == TRUE) timer_stop(2); } #pragma omp critical { for (i = 0; i <= NQ - 1; i++) q[i] += qq[i]; } #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end of parallel region */ for (i = 0; i <= NQ-1; i++) { gc = gc + q[i]; } timer_stop(1); tm = timer_read(1); nit = 0; if (M == 24) { if((fabs((sx- (-3.247834652034740e3))/sx) <= EPSILON) && (fabs((sy- (-6.958407078382297e3))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 25) { if ((fabs((sx- (-2.863319731645753e3))/sx) <= EPSILON) && (fabs((sy- (-6.320053679109499e3))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 28) { { if ((fabs((sx- (-4.295875165629892e3))/sx) <= EPSILON) && (fabs((sy- (-1.580732573678431e4))/sy) <= EPSILON)) { verified = TRUE; } printf("Debug: 231, sx is:%f, sy is:%f\n",sx,sy); } } else if (M == 30) { if ((fabs((sx- (4.033815542441498e4))/sx) <= EPSILON) && (fabs((sy- (-2.660669192809235e4))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 32) { if ((fabs((sx- (4.764367927995374e4))/sx) <= EPSILON) && (fabs((sy- (-8.084072988043731e4))/sy) <= EPSILON)) { verified = TRUE; } } Mops = pow(2.0, M+1)/tm/1000000.0; printf("EP Benchmark Results: \n" "CPU Time = %10.4f\n" "N = 2^%5d\n" "No. Gaussian Pairs = %15.0f\n" "Sums = %25.15e %25.15e\n" "Counts:\n", tm, M, gc, sx, sy); for (i = 0; i <= NQ-1; i++) { printf("%3d %15.0f\n", i, q[i]); } c_print_results("EP", CLASS, M+1, 0, 0, nit, nthreads, tm, Mops, "Random numbers generated", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); if (TIMERS_ENABLED == TRUE) { printf("Total time: %f", timer_read(1)); printf("Gaussian pairs: %f", timer_read(2)); printf("Random numbers: %f", timer_read(3)); } }
compare.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP AAA RRRR EEEEE % % C O O MM MM P P A A R R E % % C O O M M M PPPP AAAAA RRRR EEE % % C O O M M P A A R R E % % CCCC OOO M M P A A R R EEEEE % % % % % % MagickCore Image Comparison Methods % % % % Software Design % % John Cristy % % December 2003 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/compare.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/statistic.h" #include "magick/transform.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p a r e I m a g e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompareImageChannels() compares one or more image channels of an image % to a reconstructed image and returns the difference image. % % The format of the CompareImageChannels method is: % % Image *CompareImageChannels(const Image *image, % const Image *reconstruct_image,const ChannelType channel, % const MetricType metric,double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o channel: the channel. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image, const MetricType metric,double *distortion,ExceptionInfo *exception) { Image *highlight_image; highlight_image=CompareImageChannels(image,reconstruct_image,AllChannels, metric,distortion,exception); return(highlight_image); } MagickExport Image *CompareImageChannels(Image *image, const Image *reconstruct_image,const ChannelType channel, const MetricType metric,double *distortion,ExceptionInfo *exception) { CacheView *highlight_view, *image_view, *reconstruct_view; const char *artifact; Image *difference_image, *highlight_image; ssize_t y; MagickBooleanType status; MagickPixelPacket highlight, lowlight, zero; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((reconstruct_image->columns != image->columns) || (reconstruct_image->rows != image->rows)) ThrowImageException(ImageError,"ImageSizeDiffers"); status=GetImageChannelDistortion(image,reconstruct_image,channel,metric, distortion,exception); if (status == MagickFalse) return((Image *) NULL); difference_image=CloneImage(image,0,0,MagickTrue,exception); if (difference_image == (Image *) NULL) return((Image *) NULL); (void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel); highlight_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (highlight_image == (Image *) NULL) { difference_image=DestroyImage(difference_image); return((Image *) NULL); } if (SetImageStorageClass(highlight_image,DirectClass) == MagickFalse) { InheritException(exception,&highlight_image->exception); difference_image=DestroyImage(difference_image); highlight_image=DestroyImage(highlight_image); return((Image *) NULL); } (void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel); (void) QueryMagickColor("#f1001ecc",&highlight,exception); artifact=GetImageArtifact(image,"highlight-color"); if (artifact != (const char *) NULL) (void) QueryMagickColor(artifact,&highlight,exception); (void) QueryMagickColor("#ffffffcc",&lowlight,exception); artifact=GetImageArtifact(image,"lowlight-color"); if (artifact != (const char *) NULL) (void) QueryMagickColor(artifact,&lowlight,exception); if (highlight_image->colorspace == CMYKColorspace) { ConvertRGBToCMYK(&highlight); ConvertRGBToCMYK(&lowlight); } /* Generate difference image. */ status=MagickTrue; GetMagickPixelPacket(image,&zero); image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); highlight_view=AcquireCacheView(highlight_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel, reconstruct_pixel; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register IndexPacket *restrict highlight_indexes; register ssize_t x; register PixelPacket *restrict r; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(highlight_view,0,y,highlight_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); highlight_indexes=GetCacheViewAuthenticIndexQueue(highlight_view); pixel=zero; reconstruct_pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { MagickStatusType difference; SetMagickPixelPacket(image,p,indexes+x,&pixel); SetMagickPixelPacket(reconstruct_image,q,reconstruct_indexes+x, &reconstruct_pixel); difference=MagickFalse; if (channel == AllChannels) { if (IsMagickColorSimilar(&pixel,&reconstruct_pixel) == MagickFalse) difference=MagickTrue; } else { if (((channel & RedChannel) != 0) && (p->red != q->red)) difference=MagickTrue; if (((channel & GreenChannel) != 0) && (p->green != q->green)) difference=MagickTrue; if (((channel & BlueChannel) != 0) && (p->blue != q->blue)) difference=MagickTrue; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse) && (p->opacity != q->opacity)) difference=MagickTrue; if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) && (indexes[x] != reconstruct_indexes[x])) difference=MagickTrue; } if (difference != MagickFalse) SetPixelPacket(highlight_image,&highlight,r,highlight_indexes+x); else SetPixelPacket(highlight_image,&lowlight,r,highlight_indexes+x); p++; q++; r++; } sync=SyncCacheViewAuthenticPixels(highlight_view,exception); if (sync == MagickFalse) status=MagickFalse; } highlight_view=DestroyCacheView(highlight_view); reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); (void) CompositeImage(difference_image,image->compose,highlight_image,0,0); highlight_image=DestroyImage(highlight_image); if (status == MagickFalse) difference_image=DestroyImage(difference_image); return(difference_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l D i s t o r t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelDistortion() compares one or more image channels of an image % to a reconstructed image and returns the specified distortion metric. % % The format of the CompareImageChannels method is: % % MagickBooleanType GetImageChannelDistortion(const Image *image, % const Image *reconstruct_image,const ChannelType channel, % const MetricType metric,double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o channel: the channel. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDistortion(Image *image, const Image *reconstruct_image,const MetricType metric,double *distortion, ExceptionInfo *exception) { MagickBooleanType status; status=GetImageChannelDistortion(image,reconstruct_image,AllChannels, metric,distortion,exception); return(status); } static MagickBooleanType GetAbsoluteDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel,double *distortion, ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; MagickPixelPacket zero; ssize_t y; /* Compute the absolute difference in pixels between two images. */ status=MagickTrue; GetMagickPixelPacket(image,&zero); image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[AllChannels+1]; MagickPixelPacket pixel, reconstruct_pixel; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); pixel=zero; reconstruct_pixel=pixel; (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); SetMagickPixelPacket(reconstruct_image,q,reconstruct_indexes+x, &reconstruct_pixel); if (IsMagickColorSimilar(&pixel,&reconstruct_pixel) == MagickFalse) { if ((channel & RedChannel) != 0) channel_distortion[RedChannel]++; if ((channel & GreenChannel) != 0) channel_distortion[GreenChannel]++; if ((channel & BlueChannel) != 0) channel_distortion[BlueChannel]++; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) channel_distortion[OpacityChannel]++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) channel_distortion[BlackChannel]++; channel_distortion[AllChannels]++; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetAbsoluteError) #endif for (i=0; i <= (ssize_t) AllChannels; i++) distortion[i]+=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static size_t GetNumberChannels(const Image *image, const ChannelType channel) { size_t channels; channels=0; if ((channel & RedChannel) != 0) channels++; if ((channel & GreenChannel) != 0) channels++; if ((channel & BlueChannel) != 0) channels++; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) channels++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) channels++; return(channels); } static MagickBooleanType GetFuzzDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; register ssize_t i; ssize_t y; status=MagickTrue; image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[AllChannels+1]; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; if ((channel & RedChannel) != 0) { distance=QuantumScale*(p->red-(MagickRealType) q->red); channel_distortion[RedChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } if ((channel & GreenChannel) != 0) { distance=QuantumScale*(p->green-(MagickRealType) q->green); channel_distortion[GreenChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } if ((channel & BlueChannel) != 0) { distance=QuantumScale*(p->blue-(MagickRealType) q->blue); channel_distortion[BlueChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } if (((channel & OpacityChannel) != 0) && ((image->matte != MagickFalse) || (reconstruct_image->matte != MagickFalse))) { distance=QuantumScale*((image->matte != MagickFalse ? p->opacity : OpaqueOpacity)-(reconstruct_image->matte != MagickFalse ? q->opacity : OpaqueOpacity)); channel_distortion[OpacityChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=QuantumScale*(indexes[x]-(MagickRealType) reconstruct_indexes[x]); channel_distortion[BlackChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanSquaredError) #endif for (i=0; i <= (ssize_t) AllChannels; i++) distortion[i]+=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); for (i=0; i <= (ssize_t) AllChannels; i++) distortion[i]/=((double) image->columns*image->rows); if (((channel & OpacityChannel) != 0) && ((image->matte != MagickFalse) || (reconstruct_image->matte != MagickFalse))) distortion[AllChannels]/=(double) (GetNumberChannels(image,channel)-1); else distortion[AllChannels]/=(double) GetNumberChannels(image,channel); distortion[AllChannels]=sqrt(distortion[AllChannels]); return(status); } static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; register ssize_t i; ssize_t y; status=MagickTrue; image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[AllChannels+1]; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y, reconstruct_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; if ((channel & RedChannel) != 0) { distance=QuantumScale*fabs(p->red-(double) q->red); channel_distortion[RedChannel]+=distance; channel_distortion[AllChannels]+=distance; } if ((channel & GreenChannel) != 0) { distance=QuantumScale*fabs(p->green-(double) q->green); channel_distortion[GreenChannel]+=distance; channel_distortion[AllChannels]+=distance; } if ((channel & BlueChannel) != 0) { distance=QuantumScale*fabs(p->blue-(double) q->blue); channel_distortion[BlueChannel]+=distance; channel_distortion[AllChannels]+=distance; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { distance=QuantumScale*fabs(p->opacity-(double) q->opacity); channel_distortion[OpacityChannel]+=distance; channel_distortion[AllChannels]+=distance; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { distance=QuantumScale*fabs(indexes[x]-(double) reconstruct_indexes[x]); channel_distortion[BlackChannel]+=distance; channel_distortion[AllChannels]+=distance; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanAbsoluteError) #endif for (i=0; i <= (ssize_t) AllChannels; i++) distortion[i]+=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); for (i=0; i <= (ssize_t) AllChannels; i++) distortion[i]/=((double) image->columns*image->rows); distortion[AllChannels]/=(double) GetNumberChannels(image,channel); return(status); } static MagickBooleanType GetMeanErrorPerPixel(Image *image, const Image *reconstruct_image,const ChannelType channel,double *distortion, ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; MagickRealType alpha, area, beta, maximum_error, mean_error; ssize_t y; status=MagickTrue; alpha=1.0; beta=1.0; area=0.0; maximum_error=0.0; mean_error=0.0; image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; break; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; if ((channel & OpacityChannel) != 0) { if (image->matte != MagickFalse) alpha=(MagickRealType) (QuantumScale*(GetAlphaPixelComponent(p))); if (reconstruct_image->matte != MagickFalse) beta=(MagickRealType) (QuantumScale*GetAlphaPixelComponent(q)); } if ((channel & RedChannel) != 0) { distance=fabs(alpha*p->red-beta*q->red); distortion[RedChannel]+=distance; distortion[AllChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if ((channel & GreenChannel) != 0) { distance=fabs(alpha*p->green-beta*q->green); distortion[GreenChannel]+=distance; distortion[AllChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if ((channel & BlueChannel) != 0) { distance=fabs(alpha*p->blue-beta*q->blue); distortion[BlueChannel]+=distance; distortion[AllChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { distance=fabs((double) p->opacity-q->opacity); distortion[OpacityChannel]+=distance; distortion[AllChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=fabs(alpha*indexes[x]-beta*reconstruct_indexes[x]); distortion[BlackChannel]+=distance; distortion[AllChannels]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } p++; q++; } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=distortion[AllChannels]/area; image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area; image->error.normalized_maximum_error=QuantumScale*maximum_error; return(status); } static MagickBooleanType GetMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; register ssize_t i; ssize_t y; status=MagickTrue; image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[AllChannels+1]; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y, reconstruct_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; if ((channel & RedChannel) != 0) { distance=QuantumScale*(p->red-(MagickRealType) q->red); channel_distortion[RedChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } if ((channel & GreenChannel) != 0) { distance=QuantumScale*(p->green-(MagickRealType) q->green); channel_distortion[GreenChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } if ((channel & BlueChannel) != 0) { distance=QuantumScale*(p->blue-(MagickRealType) q->blue); channel_distortion[BlueChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { distance=QuantumScale*(p->opacity-(MagickRealType) q->opacity); channel_distortion[OpacityChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=QuantumScale*(indexes[x]-(MagickRealType) reconstruct_indexes[x]); channel_distortion[BlackChannel]+=distance*distance; channel_distortion[AllChannels]+=distance*distance; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanSquaredError) #endif for (i=0; i <= (ssize_t) AllChannels; i++) distortion[i]+=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); for (i=0; i <= (ssize_t) AllChannels; i++) distortion[i]/=((double) image->columns*image->rows); distortion[AllChannels]/=(double) GetNumberChannels(image,channel); return(status); } static MagickBooleanType GetNormalizedCrossCorrelationDistortion( const Image *image,const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *image_view, *reconstruct_view; ChannelStatistics *image_statistics, *reconstruct_statistics; MagickBooleanType status; MagickOffsetType progress; MagickRealType area; register ssize_t i; ssize_t y; /* Normalize to account for variation due to lighting and exposure condition. */ image_statistics=GetImageChannelStatistics(image,exception); reconstruct_statistics=GetImageChannelStatistics(reconstruct_image,exception); status=MagickTrue; progress=0; for (i=0; i <= (ssize_t) AllChannels; i++) distortion[i]=0.0; area=1.0/((MagickRealType) image->columns*image->rows); image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) distortion[RedChannel]+=area*QuantumScale*(p->red- image_statistics[RedChannel].mean)*(q->red- reconstruct_statistics[RedChannel].mean); if ((channel & GreenChannel) != 0) distortion[GreenChannel]+=area*QuantumScale*(p->green- image_statistics[GreenChannel].mean)*(q->green- reconstruct_statistics[GreenChannel].mean); if ((channel & BlueChannel) != 0) distortion[BlueChannel]+=area*QuantumScale*(p->blue- image_statistics[BlueChannel].mean)*(q->blue- reconstruct_statistics[BlueChannel].mean); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) distortion[OpacityChannel]+=area*QuantumScale*(p->opacity- image_statistics[OpacityChannel].mean)*(q->opacity- reconstruct_statistics[OpacityChannel].mean); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) distortion[BlackChannel]+=area*QuantumScale*(indexes[x]- image_statistics[OpacityChannel].mean)*(reconstruct_indexes[x]- reconstruct_statistics[OpacityChannel].mean); p++; q++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SimilarityImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); /* Divide by the standard deviation. */ for (i=0; i < (ssize_t) AllChannels; i++) { MagickRealType gamma; gamma=image_statistics[i].standard_deviation* reconstruct_statistics[i].standard_deviation; gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); distortion[i]=QuantumRange*gamma*distortion[i]; } distortion[AllChannels]=0.0; if ((channel & RedChannel) != 0) distortion[AllChannels]+=distortion[RedChannel]*distortion[RedChannel]; if ((channel & GreenChannel) != 0) distortion[AllChannels]+=distortion[GreenChannel]*distortion[GreenChannel]; if ((channel & BlueChannel) != 0) distortion[AllChannels]+=distortion[BlueChannel]*distortion[BlueChannel]; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) distortion[AllChannels]+=distortion[OpacityChannel]* distortion[OpacityChannel]; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) distortion[AllChannels]+=distortion[BlackChannel]*distortion[BlackChannel]; distortion[AllChannels]=sqrt(distortion[AllChannels]/GetNumberChannels(image, channel)); /* Free resources. */ reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); return(status); } static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; ssize_t y; status=MagickTrue; image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_distortion[AllChannels+1]; register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y, reconstruct_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); (void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; if ((channel & RedChannel) != 0) { distance=QuantumScale*fabs(p->red-(double) q->red); if (distance > channel_distortion[RedChannel]) channel_distortion[RedChannel]=distance; if (distance > channel_distortion[AllChannels]) channel_distortion[AllChannels]=distance; } if ((channel & GreenChannel) != 0) { distance=QuantumScale*fabs(p->green-(double) q->green); if (distance > channel_distortion[GreenChannel]) channel_distortion[GreenChannel]=distance; if (distance > channel_distortion[AllChannels]) channel_distortion[AllChannels]=distance; } if ((channel & BlueChannel) != 0) { distance=QuantumScale*fabs(p->blue-(double) q->blue); if (distance > channel_distortion[BlueChannel]) channel_distortion[BlueChannel]=distance; if (distance > channel_distortion[AllChannels]) channel_distortion[AllChannels]=distance; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { distance=QuantumScale*fabs(p->opacity-(double) q->opacity); if (distance > channel_distortion[OpacityChannel]) channel_distortion[OpacityChannel]=distance; if (distance > channel_distortion[AllChannels]) channel_distortion[AllChannels]=distance; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=QuantumScale*fabs(indexes[x]-(double) reconstruct_indexes[x]); if (distance > channel_distortion[BlackChannel]) channel_distortion[BlackChannel]=distance; if (distance > channel_distortion[AllChannels]) channel_distortion[AllChannels]=distance; } p++; q++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPeakAbsoluteError) #endif for (i=0; i <= (ssize_t) AllChannels; i++) if (channel_distortion[i] > distortion[i]) distortion[i]=channel_distortion[i]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { MagickBooleanType status; status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion, exception); if ((channel & RedChannel) != 0) distortion[RedChannel]=20.0*log10((double) 1.0/sqrt( distortion[RedChannel])); if ((channel & GreenChannel) != 0) distortion[GreenChannel]=20.0*log10((double) 1.0/sqrt( distortion[GreenChannel])); if ((channel & BlueChannel) != 0) distortion[BlueChannel]=20.0*log10((double) 1.0/sqrt( distortion[BlueChannel])); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) distortion[OpacityChannel]=20.0*log10((double) 1.0/sqrt( distortion[OpacityChannel])); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) distortion[BlackChannel]=20.0*log10((double) 1.0/sqrt( distortion[BlackChannel])); distortion[AllChannels]=20.0*log10((double) 1.0/sqrt( distortion[AllChannels])); return(status); } static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,const ChannelType channel, double *distortion,ExceptionInfo *exception) { MagickBooleanType status; status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion, exception); if ((channel & RedChannel) != 0) distortion[RedChannel]=sqrt(distortion[RedChannel]); if ((channel & GreenChannel) != 0) distortion[GreenChannel]=sqrt(distortion[GreenChannel]); if ((channel & BlueChannel) != 0) distortion[BlueChannel]=sqrt(distortion[BlueChannel]); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) distortion[OpacityChannel]=sqrt(distortion[OpacityChannel]); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) distortion[BlackChannel]=sqrt(distortion[BlackChannel]); distortion[AllChannels]=sqrt(distortion[AllChannels]); return(status); } MagickExport MagickBooleanType GetImageChannelDistortion(Image *image, const Image *reconstruct_image,const ChannelType channel, const MetricType metric,double *distortion,ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((reconstruct_image->columns != image->columns) || (reconstruct_image->rows != image->rows)) ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename); /* Get image distortion. */ length=AllChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_distortion,0,length* sizeof(*channel_distortion)); switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case MeanErrorPerPixelMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel, channel_distortion,exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel,channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } case PeakSignalToNoiseRatioMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image,channel, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image,channel, channel_distortion,exception); break; } } *distortion=channel_distortion[AllChannels]; channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l D i s t o r t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelDistrortion() compares the image channels of an image to a % reconstructed image and returns the specified distortion metric for each % channel. % % The format of the CompareImageChannels method is: % % double *GetImageChannelDistortions(const Image *image, % const Image *reconstruct_image,const MetricType metric, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o exception: return any errors or warnings in this structure. % */ MagickExport double *GetImageChannelDistortions(Image *image, const Image *reconstruct_image,const MetricType metric, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((reconstruct_image->columns != image->columns) || (reconstruct_image->rows != image->rows)) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ImageError,"ImageSizeDiffers","`%s'",image->filename); return((double *) NULL); } /* Get image distortion. */ length=AllChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_distortion,0,length* sizeof(*channel_distortion)); status=MagickTrue; switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,AllChannels, channel_distortion,exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,AllChannels, channel_distortion,exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image,AllChannels, channel_distortion,exception); break; } case MeanErrorPerPixelMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,AllChannels, channel_distortion,exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image,AllChannels, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, AllChannels,channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image,AllChannels, channel_distortion,exception); break; } case PeakSignalToNoiseRatioMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image,AllChannels, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image,AllChannels, channel_distortion,exception); break; } } if (status == MagickFalse) { channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); return((double *) NULL); } return(channel_distortion); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e s E q u a l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImagesEqual() measures the difference between colors at each pixel % location of two images. A value other than 0 means the colors match % exactly. Otherwise an error measure is computed by summing over all % pixels in an image the distance squared in RGB space between each image % pixel and its corresponding pixel in the reconstruct image. The error % measure is assigned to these image members: % % o mean_error_per_pixel: The mean error for any single pixel in % the image. % % o normalized_mean_error: The normalized mean quantization error for % any single pixel in the image. This distance measure is normalized to % a range between 0 and 1. It is independent of the range of red, green, % and blue values in the image. % % o normalized_maximum_error: The normalized maximum quantization % error for any single pixel in the image. This distance measure is % normalized to a range between 0 and 1. It is independent of the range % of red, green, and blue values in your image. % % A small normalized mean square error, accessed as % image->normalized_mean_error, suggests the images are very similar in % spatial layout and color. % % The format of the IsImagesEqual method is: % % MagickBooleanType IsImagesEqual(Image *image, % const Image *reconstruct_image) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % */ MagickExport MagickBooleanType IsImagesEqual(Image *image, const Image *reconstruct_image) { CacheView *image_view, *reconstruct_view; ExceptionInfo *exception; MagickBooleanType status; MagickRealType area, maximum_error, mean_error, mean_error_per_pixel; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickSignature); if ((reconstruct_image->columns != image->columns) || (reconstruct_image->rows != image->rows)) ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename); area=0.0; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; exception=(&image->exception); image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType distance; distance=fabs(p->red-(double) q->red); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; distance=fabs(p->green-(double) q->green); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; distance=fabs(p->blue-(double) q->blue); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; if (image->matte != MagickFalse) { distance=fabs(p->opacity-(double) q->opacity); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } if ((image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) { distance=fabs(indexes[x]-(double) reconstruct_indexes[x]); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } p++; q++; } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area); image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale* mean_error/area); image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error); status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i m i l a r i t y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SimilarityImage() compares the reference image of the image and returns the % best match offset. In addition, it returns a similarity image such that an % exact match location is completely white and if none of the pixels match, % black, otherwise some gray level in-between. % % The format of the SimilarityImageImage method is: % % Image *SimilarityImage(const Image *image,const Image *reference, % RectangleInfo *offset,double *similarity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reference: find an area of the image that closely resembles this image. % % o the best match offset of the reference image within the image. % % o similarity: the computed similarity between the images. % % o exception: return any errors or warnings in this structure. % */ static double GetNCCDistortion(const Image *image, const Image *reconstruct_image, const ChannelStatistics *reconstruct_statistics,ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *image_view, *reconstruct_view; ChannelStatistics *image_statistics; double distortion; MagickBooleanType status; MagickRealType area, gamma; ssize_t y; unsigned long number_channels; /* Normalize to account for variation due to lighting and exposure condition. */ image_statistics=GetImageChannelStatistics(image,exception); status=MagickTrue; distortion=0.0; area=1.0/((MagickRealType) image->columns*image->rows); image_view=AcquireCacheView(image); reconstruct_view=AcquireCacheView(reconstruct_image); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes, *restrict reconstruct_indexes; register const PixelPacket *restrict p, *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,reconstruct_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view); for (x=0; x < (ssize_t) image->columns; x++) { distortion+=area*QuantumScale*(p->red- image_statistics[RedChannel].mean)*(q->red- reconstruct_statistics[RedChannel].mean); distortion+=area*QuantumScale*(p->green- image_statistics[GreenChannel].mean)*(q->green- reconstruct_statistics[GreenChannel].mean); distortion+=area*QuantumScale*(p->blue- image_statistics[BlueChannel].mean)*(q->blue- reconstruct_statistics[BlueChannel].mean); if (image->matte != MagickFalse) distortion+=area*QuantumScale*(p->opacity- image_statistics[OpacityChannel].mean)*(q->opacity- reconstruct_statistics[OpacityChannel].mean); if ((image->colorspace == CMYKColorspace) && (reconstruct_image->colorspace == CMYKColorspace)) distortion+=area*QuantumScale*(indexes[x]- image_statistics[OpacityChannel].mean)*(reconstruct_indexes[x]- reconstruct_statistics[OpacityChannel].mean); p++; q++; } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); /* Divide by the standard deviation. */ gamma=image_statistics[AllChannels].standard_deviation* reconstruct_statistics[AllChannels].standard_deviation; gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); distortion=QuantumRange*gamma*distortion; number_channels=3; if (image->matte != MagickFalse) number_channels++; if (image->colorspace == CMYKColorspace) number_channels++; distortion=sqrt(distortion/number_channels); /* Free resources. */ image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); return(1.0-distortion); } static double GetSimilarityMetric(const Image *image,const Image *reference, const ChannelStatistics *reference_statistics,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { double distortion; Image *similarity_image; RectangleInfo geometry; SetGeometry(reference,&geometry); geometry.x=x_offset; geometry.y=y_offset; similarity_image=CropImage(image,&geometry,exception); if (similarity_image == (Image *) NULL) return(0.0); distortion=GetNCCDistortion(reference,similarity_image,reference_statistics, exception); similarity_image=DestroyImage(similarity_image); return(distortion); } MagickExport Image *SimilarityImage(Image *image,const Image *reference, RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *similarity_view; ChannelStatistics *reference_statistics; Image *similarity_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); assert(offset != (RectangleInfo *) NULL); SetGeometry(reference,offset); *similarity_metric=1.0; if ((reference->columns > image->columns) || (reference->rows > image->rows)) ThrowImageException(ImageError,"ImageSizeDiffers"); similarity_image=CloneImage(image,image->columns-reference->columns+1, image->rows-reference->rows+1,MagickTrue,exception); if (similarity_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(similarity_image,DirectClass) == MagickFalse) { InheritException(exception,&similarity_image->exception); similarity_image=DestroyImage(similarity_image); return((Image *) NULL); } /* Measure similarity of reference image against image. */ status=MagickTrue; progress=0; reference_statistics=GetImageChannelStatistics(reference,exception); similarity_view=AcquireCacheView(similarity_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++) { double similarity; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns, 1,exception); if (q == (const PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++) { similarity=GetSimilarityMetric(image,reference,reference_statistics,x,y, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif if (similarity < *similarity_metric) { *similarity_metric=similarity; offset->x=x; offset->y=y; } q->red=ClampToQuantum(QuantumRange-QuantumRange*similarity); q->green=q->red; q->blue=q->red; q++; } if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif proceed=SetImageProgress(image,SimilarityImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } similarity_view=DestroyCacheView(similarity_view); reference_statistics=(ChannelStatistics *) RelinquishMagickMemory( reference_statistics); return(similarity_image); }
GB_binop__plus_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__plus_uint64 // A.*B function (eWiseMult): GB_AemultB__plus_uint64 // A*D function (colscale): GB_AxD__plus_uint64 // D*A function (rowscale): GB_DxB__plus_uint64 // C+=B function (dense accum): GB_Cdense_accumB__plus_uint64 // C+=b function (dense accum): GB_Cdense_accumb__plus_uint64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__plus_uint64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__plus_uint64 // C=scalar+B GB_bind1st__plus_uint64 // C=scalar+B' GB_bind1st_tran__plus_uint64 // C=A+scalar GB_bind2nd__plus_uint64 // C=A'+scalar GB_bind2nd_tran__plus_uint64 // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x + y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_UINT64 || GxB_NO_PLUS_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__plus_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__plus_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__plus_uint64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__plus_uint64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__plus_uint64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__plus_uint64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__plus_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__plus_uint64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__plus_uint64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = Bx [p] ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__plus_uint64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = Ax [p] ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB_bind1st_tran__plus_uint64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB_bind2nd_tran__plus_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
csr_matvec.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * * Matvec functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "seq_mv.h" #include <assert.h> /*-------------------------------------------------------------------------- * hypre_CSRMatrixMatvec *--------------------------------------------------------------------------*/ /* y[offset:end] = alpha*A[offset:end,:]*x + beta*b[offset:end] */ HYPRE_Int hypre_CSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha, hypre_CSRMatrix *A, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *b, hypre_Vector *y, HYPRE_Int offset ) { #ifdef HYPRE_PROFILE HYPRE_Real time_begin = hypre_MPI_Wtime(); #endif #if defined(HYPRE_USING_GPU) && defined(HYPRE_USING_UNIFIED_MEMORY) /* CUDA */ PUSH_RANGE_PAYLOAD("MATVEC",0, hypre_CSRMatrixNumRows(A)); #ifdef HYPRE_BIGINT HYPRE_Int ierr = hypre_CSRMatrixMatvecDeviceBIGINT( alpha,A,x,beta,b,y,offset ); #else HYPRE_Int ierr = hypre_CSRMatrixMatvecDevice( alpha,A,x,beta,b,y,offset ); #endif POP_RANGE; #elif defined(HYPRE_USING_OPENMP_OFFLOAD) /* OMP 4.5 */ PUSH_RANGE_PAYLOAD("MATVEC-OMP",0, hypre_CSRMatrixNumRows(A)); HYPRE_Int ierr = hypre_CSRMatrixMatvecOutOfPlaceOOMP( alpha,A,x,beta,b,y,offset ); POP_RANGE; #else /* CPU */ HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A) + offset; HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A) - offset; HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A); /*HYPRE_Int num_nnz = hypre_CSRMatrixNumNonzeros(A);*/ HYPRE_Int *A_rownnz = hypre_CSRMatrixRownnz(A); HYPRE_Int num_rownnz = hypre_CSRMatrixNumRownnz(A); HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *b_data = hypre_VectorData(b) + offset; HYPRE_Complex *y_data = hypre_VectorData(y) + offset; HYPRE_Int x_size = hypre_VectorSize(x); HYPRE_Int b_size = hypre_VectorSize(b) - offset; HYPRE_Int y_size = hypre_VectorSize(y) - offset; HYPRE_Int num_vectors = hypre_VectorNumVectors(x); HYPRE_Int idxstride_y = hypre_VectorIndexStride(y); HYPRE_Int vecstride_y = hypre_VectorVectorStride(y); /*HYPRE_Int idxstride_b = hypre_VectorIndexStride(b); HYPRE_Int vecstride_b = hypre_VectorVectorStride(b);*/ HYPRE_Int idxstride_x = hypre_VectorIndexStride(x); HYPRE_Int vecstride_x = hypre_VectorVectorStride(x); HYPRE_Complex temp, tempx; HYPRE_Int i, j, jj, m, ierr=0; HYPRE_Real xpar=0.7; hypre_Vector *x_tmp = NULL; /*--------------------------------------------------------------------- * Check for size compatibility. Matvec returns ierr = 1 if * length of X doesn't equal the number of columns of A, * ierr = 2 if the length of Y doesn't equal the number of rows * of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in Matvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_assert( num_vectors == hypre_VectorNumVectors(y) ); hypre_assert( num_vectors == hypre_VectorNumVectors(b) ); if (num_cols != x_size) ierr = 1; if (num_rows != y_size || num_rows != b_size) ierr = 2; if (num_cols != x_size && (num_rows != y_size || num_rows != b_size)) ierr = 3; /*----------------------------------------------------------------------- * Do (alpha == 0.0) computation - RDF: USE MACHINE EPS *-----------------------------------------------------------------------*/ if (alpha == 0.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows*num_vectors; i++) y_data[i] = beta*b_data[i]; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin; #endif return ierr; } if (x == y) { x_tmp = hypre_SeqVectorCloneDeep(x); x_data = hypre_VectorData(x_tmp); } /*----------------------------------------------------------------------- * y = (beta/alpha)*y *-----------------------------------------------------------------------*/ temp = beta / alpha; /* use rownnz pointer to do the A*x multiplication when num_rownnz is smaller than num_rows */ if (num_rownnz < xpar*(num_rows) || num_vectors > 1) { /*----------------------------------------------------------------------- * y = (beta/alpha)*y *-----------------------------------------------------------------------*/ if (temp != 1.0) { if (temp == 0.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows*num_vectors; i++) y_data[i] = 0.0; } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows*num_vectors; i++) y_data[i] = b_data[i]*temp; } } else { for (i = 0; i < num_rows*num_vectors; i++) y_data[i] = b_data[i]; } /*----------------------------------------------------------------- * y += A*x *-----------------------------------------------------------------*/ if (num_rownnz < xpar*(num_rows)) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jj,m,tempx) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rownnz; i++) { m = A_rownnz[i]; /* * for (jj = A_i[m]; jj < A_i[m+1]; jj++) * { * j = A_j[jj]; * y_data[m] += A_data[jj] * x_data[j]; * } */ if ( num_vectors==1 ) { tempx = 0; for (jj = A_i[m]; jj < A_i[m+1]; jj++) tempx += A_data[jj] * x_data[A_j[jj]]; y_data[m] += tempx; } else for ( j=0; j<num_vectors; ++j ) { tempx = 0; for (jj = A_i[m]; jj < A_i[m+1]; jj++) tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ]; y_data[ j*vecstride_y + m*idxstride_y] += tempx; } } } else // num_vectors > 1 { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jj,tempx) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) { for (j = 0; j < num_vectors; ++j) { tempx = 0; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ]; } y_data[ j*vecstride_y + i*idxstride_y ] += tempx; } } } /*----------------------------------------------------------------- * y = alpha*y *-----------------------------------------------------------------*/ if (alpha != 1.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows*num_vectors; i++) y_data[i] *= alpha; } } else { // JSP: this is currently the only path optimized #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,jj,tempx) #endif { HYPRE_Int iBegin = hypre_CSRMatrixGetLoadBalancedPartitionBegin(A); HYPRE_Int iEnd = hypre_CSRMatrixGetLoadBalancedPartitionEnd(A); hypre_assert(iBegin <= iEnd); hypre_assert(iBegin >= 0 && iBegin <= num_rows); hypre_assert(iEnd >= 0 && iEnd <= num_rows); if (0 == temp) { if (1 == alpha) // JSP: a common path { for (i = iBegin; i < iEnd; i++) { tempx = 0.0; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx += A_data[jj] * x_data[A_j[jj]]; } y_data[i] = tempx; } } // y = A*x else if (-1 == alpha) { for (i = iBegin; i < iEnd; i++) { tempx = 0.0; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx -= A_data[jj] * x_data[A_j[jj]]; } y_data[i] = tempx; } } // y = -A*x else { for (i = iBegin; i < iEnd; i++) { tempx = 0.0; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx += A_data[jj] * x_data[A_j[jj]]; } y_data[i] = alpha*tempx; } } // y = alpha*A*x } // temp == 0 else if (-1 == temp) // beta == -alpha { if (1 == alpha) // JSP: a common path { for (i = iBegin; i < iEnd; i++) { tempx = -b_data[i]; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx += A_data[jj] * x_data[A_j[jj]]; } y_data[i] = tempx; } } // y = A*x - y else if (-1 == alpha) // JSP: a common path { for (i = iBegin; i < iEnd; i++) { tempx = b_data[i]; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx -= A_data[jj] * x_data[A_j[jj]]; } y_data[i] = tempx; } } // y = -A*x + y else { for (i = iBegin; i < iEnd; i++) { tempx = -b_data[i]; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx += A_data[jj] * x_data[A_j[jj]]; } y_data[i] = alpha*tempx; } } // y = alpha*(A*x - y) } // temp == -1 else if (1 == temp) { if (1 == alpha) // JSP: a common path { for (i = iBegin; i < iEnd; i++) { tempx = b_data[i]; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx += A_data[jj] * x_data[A_j[jj]]; } y_data[i] = tempx; } } // y = A*x + y else if (-1 == alpha) { for (i = iBegin; i < iEnd; i++) { tempx = -b_data[i]; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx -= A_data[jj] * x_data[A_j[jj]]; } y_data[i] = tempx; } } // y = -A*x - y else { for (i = iBegin; i < iEnd; i++) { tempx = b_data[i]; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx += A_data[jj] * x_data[A_j[jj]]; } y_data[i] = alpha*tempx; } } // y = alpha*(A*x + y) } else { if (1 == alpha) // JSP: a common path { for (i = iBegin; i < iEnd; i++) { tempx = b_data[i]*temp; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx += A_data[jj] * x_data[A_j[jj]]; } y_data[i] = tempx; } } // y = A*x + temp*y else if (-1 == alpha) { for (i = iBegin; i < iEnd; i++) { tempx = -b_data[i]*temp; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx -= A_data[jj] * x_data[A_j[jj]]; } y_data[i] = tempx; } } // y = -A*x - temp*y else { for (i = iBegin; i < iEnd; i++) { tempx = b_data[i]*temp; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx += A_data[jj] * x_data[A_j[jj]]; } y_data[i] = alpha*tempx; } } // y = alpha*(A*x + temp*y) } // temp != 0 && temp != -1 && temp != 1 } // omp parallel } if (x == y) hypre_SeqVectorDestroy(x_tmp); #endif /* CPU */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin; #endif return ierr; } HYPRE_Int hypre_CSRMatrixMatvec( HYPRE_Complex alpha, hypre_CSRMatrix *A, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *y ) { return hypre_CSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y, 0); } #if defined (HYPRE_USING_UNIFIED_MEMORY) HYPRE_Int hypre_CSRMatrixMatvec3( HYPRE_Complex alpha, hypre_CSRMatrix *A, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *y ) { return hypre_CSRMatrixMatvecOutOfPlaceOOMP3(alpha, A, x, beta, y, y, 0); } #endif /*-------------------------------------------------------------------------- * hypre_CSRMatrixMatvecT * * This version is using a different (more efficient) threading scheme * Performs y <- alpha * A^T * x + beta * y * * From Van Henson's modification of hypre_CSRMatrixMatvec. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixMatvecT( HYPRE_Complex alpha, hypre_CSRMatrix *A, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *y ) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A); HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int x_size = hypre_VectorSize(x); HYPRE_Int y_size = hypre_VectorSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(x); HYPRE_Int idxstride_y = hypre_VectorIndexStride(y); HYPRE_Int vecstride_y = hypre_VectorVectorStride(y); HYPRE_Int idxstride_x = hypre_VectorIndexStride(x); HYPRE_Int vecstride_x = hypre_VectorVectorStride(x); HYPRE_Complex temp; HYPRE_Complex *y_data_expand; HYPRE_Int my_thread_num = 0, offset = 0; HYPRE_Int i, j, jv, jj; HYPRE_Int num_threads; HYPRE_Int ierr = 0; hypre_Vector *x_tmp = NULL; /*--------------------------------------------------------------------- * Check for size compatibility. MatvecT returns ierr = 1 if * length of X doesn't equal the number of rows of A, * ierr = 2 if the length of Y doesn't equal the number of * columns of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in MatvecT, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_assert( num_vectors == hypre_VectorNumVectors(y) ); if (num_rows != x_size) ierr = 1; if (num_cols != y_size) ierr = 2; if (num_rows != x_size && num_cols != y_size) ierr = 3; /*----------------------------------------------------------------------- * Do (alpha == 0.0) computation - RDF: USE MACHINE EPS *-----------------------------------------------------------------------*/ if (alpha == 0.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols*num_vectors; i++) y_data[i] *= beta; return ierr; } if (x == y) { x_tmp = hypre_SeqVectorCloneDeep(x); x_data = hypre_VectorData(x_tmp); } /*----------------------------------------------------------------------- * y = (beta/alpha)*y *-----------------------------------------------------------------------*/ temp = beta / alpha; if (temp != 1.0) { if (temp == 0.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols*num_vectors; i++) y_data[i] = 0.0; } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols*num_vectors; i++) y_data[i] *= temp; } } /*----------------------------------------------------------------- * y += A^T*x *-----------------------------------------------------------------*/ num_threads = hypre_NumThreads(); if (num_threads > 1) { y_data_expand = hypre_CTAlloc(HYPRE_Complex, num_threads*y_size, HYPRE_MEMORY_HOST); if ( num_vectors==1 ) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,jj,j,my_thread_num,offset) #endif { my_thread_num = hypre_GetThreadNum(); offset = y_size*my_thread_num; #ifdef HYPRE_USING_OPENMP #pragma omp for HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) { for (jj = A_i[i]; jj < A_i[i+1]; jj++) { j = A_j[jj]; y_data_expand[offset + j] += A_data[jj] * x_data[i]; } } /* implied barrier (for threads)*/ #ifdef HYPRE_USING_OPENMP #pragma omp for HYPRE_SMP_SCHEDULE #endif for (i = 0; i < y_size; i++) { for (j = 0; j < num_threads; j++) { y_data[i] += y_data_expand[j*y_size + i]; } } } /* end parallel threaded region */ } else { /* multiple vector case is not threaded */ for (i = 0; i < num_rows; i++) { for ( jv=0; jv<num_vectors; ++jv ) { for (jj = A_i[i]; jj < A_i[i+1]; jj++) { j = A_j[jj]; y_data[ j*idxstride_y + jv*vecstride_y ] += A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x]; } } } } hypre_TFree(y_data_expand, HYPRE_MEMORY_HOST); } else { for (i = 0; i < num_rows; i++) { if ( num_vectors==1 ) { for (jj = A_i[i]; jj < A_i[i+1]; jj++) { j = A_j[jj]; y_data[j] += A_data[jj] * x_data[i]; } } else { for ( jv=0; jv<num_vectors; ++jv ) { for (jj = A_i[i]; jj < A_i[i+1]; jj++) { j = A_j[jj]; y_data[ j*idxstride_y + jv*vecstride_y ] += A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x ]; } } } } } /*----------------------------------------------------------------- * y = alpha*y *-----------------------------------------------------------------*/ if (alpha != 1.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols*num_vectors; i++) y_data[i] *= alpha; } if (x == y) hypre_SeqVectorDestroy(x_tmp); return ierr; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixMatvec_FF *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixMatvec_FF( HYPRE_Complex alpha, hypre_CSRMatrix *A, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *y, HYPRE_Int *CF_marker_x, HYPRE_Int *CF_marker_y, HYPRE_Int fpt ) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A); HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int x_size = hypre_VectorSize(x); HYPRE_Int y_size = hypre_VectorSize(y); HYPRE_Complex temp; HYPRE_Int i, jj; HYPRE_Int ierr = 0; /*--------------------------------------------------------------------- * Check for size compatibility. Matvec returns ierr = 1 if * length of X doesn't equal the number of columns of A, * ierr = 2 if the length of Y doesn't equal the number of rows * of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in Matvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ if (num_cols != x_size) ierr = 1; if (num_rows != y_size) ierr = 2; if (num_cols != x_size && num_rows != y_size) ierr = 3; /*----------------------------------------------------------------------- * Do (alpha == 0.0) computation - RDF: USE MACHINE EPS *-----------------------------------------------------------------------*/ if (alpha == 0.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) if (CF_marker_x[i] == fpt) y_data[i] *= beta; return ierr; } /*----------------------------------------------------------------------- * y = (beta/alpha)*y *-----------------------------------------------------------------------*/ temp = beta / alpha; if (temp != 1.0) { if (temp == 0.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) if (CF_marker_x[i] == fpt) y_data[i] = 0.0; } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) if (CF_marker_x[i] == fpt) y_data[i] *= temp; } } /*----------------------------------------------------------------- * y += A*x *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,jj) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) { if (CF_marker_x[i] == fpt) { temp = y_data[i]; for (jj = A_i[i]; jj < A_i[i+1]; jj++) if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]]; y_data[i] = temp; } } /*----------------------------------------------------------------- * y = alpha*y *-----------------------------------------------------------------*/ if (alpha != 1.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) if (CF_marker_x[i] == fpt) y_data[i] *= alpha; } return ierr; } #if defined(HYPRE_USING_GPU) && defined(HYPRE_USING_UNIFIED_MEMORY) HYPRE_Int hypre_CSRMatrixMatvecDevice( HYPRE_Complex alpha, hypre_CSRMatrix *A, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *b, hypre_Vector *y, HYPRE_Int offset ) { #ifdef HYPRE_BIGINT hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR: hypre_CSRMatvecDevice should not be called when bigint is enabled!"); #else static cusparseHandle_t handle; static cusparseMatDescr_t descr; static HYPRE_Int FirstCall=1; cusparseStatus_t status; static cudaStream_t s[10]; static HYPRE_Int myid; if (b!=y){ PUSH_RANGE_PAYLOAD("MEMCPY",1,y->size-offset); VecCopy(y->data,b->data,(y->size-offset),HYPRE_STREAM(4)); POP_RANGE } if (x==y) hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR::x and y are the same pointer in hypre_CSRMatrixMatvecDevice\n"); if (FirstCall){ PUSH_RANGE("FIRST_CALL",4); handle=getCusparseHandle(); status= cusparseCreateMatDescr(&descr); if (status != CUSPARSE_STATUS_SUCCESS) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR:: Matrix descriptor initialization failed\n"); return hypre_error_flag; } cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO); FirstCall=0; hypre_int jj; for(jj=0;jj<5;jj++) s[jj]=HYPRE_STREAM(jj); nvtxNameCudaStreamA(s[4], "HYPRE_COMPUTE_STREAM"); hypre_MPI_Comm_rank(hypre_MPI_COMM_WORLD, &myid ); myid++; POP_RANGE; } PUSH_RANGE("PREFETCH+SPMV",2); hypre_CSRMatrixPrefetchToDevice(A); hypre_SeqVectorPrefetchToDevice(x); hypre_SeqVectorPrefetchToDevice(y); //if (offset!=0) hypre_printf("WARNING:: Offset is not zero in hypre_CSRMatrixMatvecDevice :: \n"); cusparseErrchk(cusparseDcsrmv(handle , CUSPARSE_OPERATION_NON_TRANSPOSE, A->num_rows-offset, A->num_cols, A->num_nonzeros, &alpha, descr, A->data ,A->i+offset,A->j, x->data, &beta, y->data+offset)); if (!GetAsyncMode()){ hypre_CheckErrorDevice(cudaStreamSynchronize(s[4])); } POP_RANGE; #endif return hypre_error_flag; } HYPRE_Int hypre_CSRMatrixMatvecDeviceBIGINT( HYPRE_Complex alpha, hypre_CSRMatrix *A, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *b, hypre_Vector *y, HYPRE_Int offset ) { #ifdef HYPRE_BIGINT static cusparseHandle_t handle; static cusparseMatDescr_t descr; static HYPRE_Int FirstCall=1; cusparseStatus_t status; static cudaStream_t s[10]; static HYPRE_Int myid; if (b!=y){ PUSH_RANGE_PAYLOAD("MEMCPY",1,y->size-offset); VecCopy(y->data,b->data,(y->size-offset),HYPRE_STREAM(4)); POP_RANGE } if (x==y) fprintf(stderr,"ERROR::x and y are the same pointer in hypre_CSRMatrixMatvecDevice\n"); if (FirstCall){ PUSH_RANGE("FIRST_CALL",4); handle=getCusparseHandle(); status= cusparseCreateMatDescr(&descr); if (status != CUSPARSE_STATUS_SUCCESS) { printf("ERROR:: Matrix descriptor initialization failed\n"); exit(2); } cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO); FirstCall=0; hypre_int jj; for(jj=0;jj<5;jj++) s[jj]=HYPRE_STREAM(jj); nvtxNameCudaStreamA(s[4], "HYPRE_COMPUTE_STREAM"); hypre_MPI_Comm_rank(hypre_MPI_COMM_WORLD, &myid ); myid++; POP_RANGE; } PUSH_RANGE("PREFETCH+SPMV",2); hypre_int num_rows = hypre_CSRMatrixNumRows(A); hypre_int num_cols = hypre_CSRMatrixNumCols(A); hypre_int num_nonzeros = hypre_CSRMatrixNumNonzeros(A); if (A->i_short==NULL) { A->i_short = hypre_CTAlloc(hypre_int, num_rows + 1, HYPRE_MEMORY_SHARED); A->j_short = hypre_CTAlloc(hypre_int, num_nonzeros, HYPRE_MEMORY_SHARED); hypre_CSRMatrixPrefetchToDevice(A); hypre_CSRMatrixPrefetchToDeviceBIGINT(A); BigToSmallCopy(A->i_short,A->i,num_rows+1,0); BigToSmallCopy(A->j_short,A->j,num_nonzeros,0); hypre_CheckErrorDevice(cudaStreamSynchronize(0)); //hypre_printf("BIGINT MOD :: Arrays copied \n"); } //hypre_CSRMatrixPrefetchToDevice(A); hypre_SeqVectorPrefetchToDevice(x); hypre_SeqVectorPrefetchToDevice(y); if (offset!=0) hypre_error_w_msg(HYPRE_ERROR_GENERIC, "WARNING:: Offset is not zero in hypre_CSRMatrixMatvecDevice \n"); cusparseErrchk(cusparseDcsrmv(handle , CUSPARSE_OPERATION_NON_TRANSPOSE, num_rows-offset, num_cols, num_nonzeros, &alpha, descr, A->data ,A->i_short+offset,A->j_short, x->data, &beta, y->data+offset)); if (!GetAsyncMode()){ hypre_CheckErrorDevice(cudaStreamSynchronize(s[4])); } POP_RANGE; #endif return 0; } #endif
bondfree.c
/* * This file is part of the GROMACS molecular simulation package. * * Copyright (c) 1991-2000, University of Groningen, The Netherlands. * Copyright (c) 2001-2004, The GROMACS development team, * check out http://www.gromacs.org for more information. * Copyright (c) 2012,2013, by the GROMACS development team, led by * David van der Spoel, Berk Hess, Erik Lindahl, and including many * others, as listed in the AUTHORS file in the top-level source * directory and at http://www.gromacs.org. * * GROMACS is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * GROMACS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with GROMACS; if not, see * http://www.gnu.org/licenses, or write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * If you want to redistribute modifications to GROMACS, please * consider that scientific software is very special. Version * control is crucial - bugs must be traceable. We will be happy to * consider code for inclusion in the official distribution, but * derived work must not be called official GROMACS. Details are found * in the README & COPYING files - if they are missing, get the * official version at http://www.gromacs.org. * * To help us fund GROMACS development, we humbly ask that you cite * the research papers on the package. Check out http://www.gromacs.org. */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <math.h> #include <assert.h> #include "physics.h" #include "vec.h" #include "maths.h" #include "txtdump.h" #include "bondf.h" #include "smalloc.h" #include "pbc.h" #include "ns.h" #include "macros.h" #include "names.h" #include "gmx_fatal.h" #include "mshift.h" #include "main.h" #include "disre.h" #include "orires.h" #include "force.h" #include "nonbonded.h" /* Include the SIMD macro file and then check for support */ #include "gmx_simd_macros.h" #if defined GMX_HAVE_SIMD_MACROS && defined GMX_SIMD_HAVE_TRIGONOMETRIC #define SIMD_BONDEDS #include "gmx_simd_vec.h" #endif /* Find a better place for this? */ const int cmap_coeff_matrix[] = { 1, 0, -3, 2, 0, 0, 0, 0, -3, 0, 9, -6, 2, 0, -6, 4, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, -9, 6, -2, 0, 6, -4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, -6, 0, 0, -6, 4, 0, 0, 3, -2, 0, 0, 0, 0, 0, 0, -9, 6, 0, 0, 6, -4, 0, 0, 0, 0, 1, 0, -3, 2, -2, 0, 6, -4, 1, 0, -3, 2, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 3, -2, 1, 0, -3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, 2, 0, 0, 3, -2, 0, 0, 0, 0, 0, 0, 3, -2, 0, 0, -6, 4, 0, 0, 3, -2, 0, 1, -2, 1, 0, 0, 0, 0, 0, -3, 6, -3, 0, 2, -4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, -6, 3, 0, -2, 4, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, 3, 0, 0, 2, -2, 0, 0, -1, 1, 0, 0, 0, 0, 0, 0, 3, -3, 0, 0, -2, 2, 0, 0, 0, 0, 0, 1, -2, 1, 0, -2, 4, -2, 0, 1, -2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 2, -1, 0, 1, -2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, -1, 1, 0, 0, 0, 0, 0, 0, -1, 1, 0, 0, 2, -2, 0, 0, -1, 1 }; int glatnr(int *global_atom_index, int i) { int atnr; if (global_atom_index == NULL) { atnr = i + 1; } else { atnr = global_atom_index[i] + 1; } return atnr; } static int pbc_rvec_sub(const t_pbc *pbc, const rvec xi, const rvec xj, rvec dx) { if (pbc) { return pbc_dx_aiuc(pbc, xi, xj, dx); } else { rvec_sub(xi, xj, dx); return CENTRAL; } } #ifdef SIMD_BONDEDS /* SIMD PBC data structure, containing 1/boxdiag and the box vectors */ typedef struct { gmx_mm_pr inv_bzz; gmx_mm_pr inv_byy; gmx_mm_pr inv_bxx; gmx_mm_pr bzx; gmx_mm_pr bzy; gmx_mm_pr bzz; gmx_mm_pr byx; gmx_mm_pr byy; gmx_mm_pr bxx; } pbc_simd_t; /* Set the SIMD pbc data from a normal t_pbc struct */ static void set_pbc_simd(const t_pbc *pbc, pbc_simd_t *pbc_simd) { rvec inv_bdiag; int d; /* Setting inv_bdiag to 0 effectively turns off PBC */ clear_rvec(inv_bdiag); if (pbc != NULL) { for (d = 0; d < pbc->ndim_ePBC; d++) { inv_bdiag[d] = 1.0/pbc->box[d][d]; } } pbc_simd->inv_bzz = gmx_set1_pr(inv_bdiag[ZZ]); pbc_simd->inv_byy = gmx_set1_pr(inv_bdiag[YY]); pbc_simd->inv_bxx = gmx_set1_pr(inv_bdiag[XX]); if (pbc != NULL) { pbc_simd->bzx = gmx_set1_pr(pbc->box[ZZ][XX]); pbc_simd->bzy = gmx_set1_pr(pbc->box[ZZ][YY]); pbc_simd->bzz = gmx_set1_pr(pbc->box[ZZ][ZZ]); pbc_simd->byx = gmx_set1_pr(pbc->box[YY][XX]); pbc_simd->byy = gmx_set1_pr(pbc->box[YY][YY]); pbc_simd->bxx = gmx_set1_pr(pbc->box[XX][XX]); } else { pbc_simd->bzx = gmx_setzero_pr(); pbc_simd->bzy = gmx_setzero_pr(); pbc_simd->bzz = gmx_setzero_pr(); pbc_simd->byx = gmx_setzero_pr(); pbc_simd->byy = gmx_setzero_pr(); pbc_simd->bxx = gmx_setzero_pr(); } } /* Correct distance vector *dx,*dy,*dz for PBC using SIMD */ static gmx_inline void pbc_dx_simd(gmx_mm_pr *dx, gmx_mm_pr *dy, gmx_mm_pr *dz, const pbc_simd_t *pbc) { gmx_mm_pr sh; sh = gmx_round_pr(gmx_mul_pr(*dz, pbc->inv_bzz)); *dx = gmx_nmsub_pr(sh, pbc->bzx, *dx); *dy = gmx_nmsub_pr(sh, pbc->bzy, *dy); *dz = gmx_nmsub_pr(sh, pbc->bzz, *dz); sh = gmx_round_pr(gmx_mul_pr(*dy, pbc->inv_byy)); *dx = gmx_nmsub_pr(sh, pbc->byx, *dx); *dy = gmx_nmsub_pr(sh, pbc->byy, *dy); sh = gmx_round_pr(gmx_mul_pr(*dx, pbc->inv_bxx)); *dx = gmx_nmsub_pr(sh, pbc->bxx, *dx); } #endif /* SIMD_BONDEDS */ /* * Morse potential bond by Frank Everdij * * Three parameters needed: * * b0 = equilibrium distance in nm * be = beta in nm^-1 (actually, it's nu_e*Sqrt(2*pi*pi*mu/D_e)) * cb = well depth in kJ/mol * * Note: the potential is referenced to be +cb at infinite separation * and zero at the equilibrium distance! */ real morse_bonds(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { const real one = 1.0; const real two = 2.0; real dr, dr2, temp, omtemp, cbomtemp, fbond, vbond, fij, vtot; real b0, be, cb, b0A, beA, cbA, b0B, beB, cbB, L1; rvec dx; int i, m, ki, type, ai, aj; ivec dt; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; b0A = forceparams[type].morse.b0A; beA = forceparams[type].morse.betaA; cbA = forceparams[type].morse.cbA; b0B = forceparams[type].morse.b0B; beB = forceparams[type].morse.betaB; cbB = forceparams[type].morse.cbB; L1 = one-lambda; /* 1 */ b0 = L1*b0A + lambda*b0B; /* 3 */ be = L1*beA + lambda*beB; /* 3 */ cb = L1*cbA + lambda*cbB; /* 3 */ ki = pbc_rvec_sub(pbc, x[ai], x[aj], dx); /* 3 */ dr2 = iprod(dx, dx); /* 5 */ dr = dr2*gmx_invsqrt(dr2); /* 10 */ temp = exp(-be*(dr-b0)); /* 12 */ if (temp == one) { /* bonds are constrainted. This may _not_ include bond constraints if they are lambda dependent */ *dvdlambda += cbB-cbA; continue; } omtemp = one-temp; /* 1 */ cbomtemp = cb*omtemp; /* 1 */ vbond = cbomtemp*omtemp; /* 1 */ fbond = -two*be*temp*cbomtemp*gmx_invsqrt(dr2); /* 9 */ vtot += vbond; /* 1 */ *dvdlambda += (cbB - cbA) * omtemp * omtemp - (2-2*omtemp)*omtemp * cb * ((b0B-b0A)*be - (beB-beA)*(dr-b0)); /* 15 */ if (g) { ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt); ki = IVEC2IS(dt); } for (m = 0; (m < DIM); m++) /* 15 */ { fij = fbond*dx[m]; f[ai][m] += fij; f[aj][m] -= fij; fshift[ki][m] += fij; fshift[CENTRAL][m] -= fij; } } /* 83 TOTAL */ return vtot; } real cubic_bonds(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { const real three = 3.0; const real two = 2.0; real kb, b0, kcub; real dr, dr2, dist, kdist, kdist2, fbond, vbond, fij, vtot; rvec dx; int i, m, ki, type, ai, aj; ivec dt; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; b0 = forceparams[type].cubic.b0; kb = forceparams[type].cubic.kb; kcub = forceparams[type].cubic.kcub; ki = pbc_rvec_sub(pbc, x[ai], x[aj], dx); /* 3 */ dr2 = iprod(dx, dx); /* 5 */ if (dr2 == 0.0) { continue; } dr = dr2*gmx_invsqrt(dr2); /* 10 */ dist = dr-b0; kdist = kb*dist; kdist2 = kdist*dist; vbond = kdist2 + kcub*kdist2*dist; fbond = -(two*kdist + three*kdist2*kcub)/dr; vtot += vbond; /* 21 */ if (g) { ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt); ki = IVEC2IS(dt); } for (m = 0; (m < DIM); m++) /* 15 */ { fij = fbond*dx[m]; f[ai][m] += fij; f[aj][m] -= fij; fshift[ki][m] += fij; fshift[CENTRAL][m] -= fij; } } /* 54 TOTAL */ return vtot; } real FENE_bonds(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { const real half = 0.5; const real one = 1.0; real bm, kb; real dr, dr2, bm2, omdr2obm2, fbond, vbond, fij, vtot; rvec dx; int i, m, ki, type, ai, aj; ivec dt; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; bm = forceparams[type].fene.bm; kb = forceparams[type].fene.kb; ki = pbc_rvec_sub(pbc, x[ai], x[aj], dx); /* 3 */ dr2 = iprod(dx, dx); /* 5 */ if (dr2 == 0.0) { continue; } bm2 = bm*bm; if (dr2 >= bm2) { gmx_fatal(FARGS, "r^2 (%f) >= bm^2 (%f) in FENE bond between atoms %d and %d", dr2, bm2, glatnr(global_atom_index, ai), glatnr(global_atom_index, aj)); } omdr2obm2 = one - dr2/bm2; vbond = -half*kb*bm2*log(omdr2obm2); fbond = -kb/omdr2obm2; vtot += vbond; /* 35 */ if (g) { ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt); ki = IVEC2IS(dt); } for (m = 0; (m < DIM); m++) /* 15 */ { fij = fbond*dx[m]; f[ai][m] += fij; f[aj][m] -= fij; fshift[ki][m] += fij; fshift[CENTRAL][m] -= fij; } } /* 58 TOTAL */ return vtot; } real harmonic(real kA, real kB, real xA, real xB, real x, real lambda, real *V, real *F) { const real half = 0.5; real L1, kk, x0, dx, dx2; real v, f, dvdlambda; L1 = 1.0-lambda; kk = L1*kA+lambda*kB; x0 = L1*xA+lambda*xB; dx = x-x0; dx2 = dx*dx; f = -kk*dx; v = half*kk*dx2; dvdlambda = half*(kB-kA)*dx2 + (xA-xB)*kk*dx; *F = f; *V = v; return dvdlambda; /* That was 19 flops */ } real bonds(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { int i, m, ki, ai, aj, type; real dr, dr2, fbond, vbond, fij, vtot; rvec dx; ivec dt; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ki = pbc_rvec_sub(pbc, x[ai], x[aj], dx); /* 3 */ dr2 = iprod(dx, dx); /* 5 */ dr = dr2*gmx_invsqrt(dr2); /* 10 */ *dvdlambda += harmonic(forceparams[type].harmonic.krA, forceparams[type].harmonic.krB, forceparams[type].harmonic.rA, forceparams[type].harmonic.rB, dr, lambda, &vbond, &fbond); /* 19 */ if (dr2 == 0.0) { continue; } vtot += vbond; /* 1*/ fbond *= gmx_invsqrt(dr2); /* 6 */ #ifdef DEBUG if (debug) { fprintf(debug, "BONDS: dr = %10g vbond = %10g fbond = %10g\n", dr, vbond, fbond); } #endif if (g) { ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt); ki = IVEC2IS(dt); } for (m = 0; (m < DIM); m++) /* 15 */ { fij = fbond*dx[m]; f[ai][m] += fij; f[aj][m] -= fij; fshift[ki][m] += fij; fshift[CENTRAL][m] -= fij; } } /* 59 TOTAL */ return vtot; } real restraint_bonds(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { int i, m, ki, ai, aj, type; real dr, dr2, fbond, vbond, fij, vtot; real L1; real low, dlow, up1, dup1, up2, dup2, k, dk; real drh, drh2; rvec dx; ivec dt; L1 = 1.0 - lambda; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ki = pbc_rvec_sub(pbc, x[ai], x[aj], dx); /* 3 */ dr2 = iprod(dx, dx); /* 5 */ dr = dr2*gmx_invsqrt(dr2); /* 10 */ low = L1*forceparams[type].restraint.lowA + lambda*forceparams[type].restraint.lowB; dlow = -forceparams[type].restraint.lowA + forceparams[type].restraint.lowB; up1 = L1*forceparams[type].restraint.up1A + lambda*forceparams[type].restraint.up1B; dup1 = -forceparams[type].restraint.up1A + forceparams[type].restraint.up1B; up2 = L1*forceparams[type].restraint.up2A + lambda*forceparams[type].restraint.up2B; dup2 = -forceparams[type].restraint.up2A + forceparams[type].restraint.up2B; k = L1*forceparams[type].restraint.kA + lambda*forceparams[type].restraint.kB; dk = -forceparams[type].restraint.kA + forceparams[type].restraint.kB; /* 24 */ if (dr < low) { drh = dr - low; drh2 = drh*drh; vbond = 0.5*k*drh2; fbond = -k*drh; *dvdlambda += 0.5*dk*drh2 - k*dlow*drh; } /* 11 */ else if (dr <= up1) { vbond = 0; fbond = 0; } else if (dr <= up2) { drh = dr - up1; drh2 = drh*drh; vbond = 0.5*k*drh2; fbond = -k*drh; *dvdlambda += 0.5*dk*drh2 - k*dup1*drh; } /* 11 */ else { drh = dr - up2; vbond = k*(up2 - up1)*(0.5*(up2 - up1) + drh); fbond = -k*(up2 - up1); *dvdlambda += dk*(up2 - up1)*(0.5*(up2 - up1) + drh) + k*(dup2 - dup1)*(up2 - up1 + drh) - k*(up2 - up1)*dup2; } if (dr2 == 0.0) { continue; } vtot += vbond; /* 1*/ fbond *= gmx_invsqrt(dr2); /* 6 */ #ifdef DEBUG if (debug) { fprintf(debug, "BONDS: dr = %10g vbond = %10g fbond = %10g\n", dr, vbond, fbond); } #endif if (g) { ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt); ki = IVEC2IS(dt); } for (m = 0; (m < DIM); m++) /* 15 */ { fij = fbond*dx[m]; f[ai][m] += fij; f[aj][m] -= fij; fshift[ki][m] += fij; fshift[CENTRAL][m] -= fij; } } /* 59 TOTAL */ return vtot; } real polarize(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { int i, m, ki, ai, aj, type; real dr, dr2, fbond, vbond, fij, vtot, ksh; rvec dx; ivec dt; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ksh = sqr(md->chargeA[aj])*ONE_4PI_EPS0/forceparams[type].polarize.alpha; if (debug) { fprintf(debug, "POL: local ai = %d aj = %d ksh = %.3f\n", ai, aj, ksh); } ki = pbc_rvec_sub(pbc, x[ai], x[aj], dx); /* 3 */ dr2 = iprod(dx, dx); /* 5 */ dr = dr2*gmx_invsqrt(dr2); /* 10 */ *dvdlambda += harmonic(ksh, ksh, 0, 0, dr, lambda, &vbond, &fbond); /* 19 */ if (dr2 == 0.0) { continue; } vtot += vbond; /* 1*/ fbond *= gmx_invsqrt(dr2); /* 6 */ if (g) { ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt); ki = IVEC2IS(dt); } for (m = 0; (m < DIM); m++) /* 15 */ { fij = fbond*dx[m]; f[ai][m] += fij; f[aj][m] -= fij; fshift[ki][m] += fij; fshift[CENTRAL][m] -= fij; } } /* 59 TOTAL */ return vtot; } real anharm_polarize(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { int i, m, ki, ai, aj, type; real dr, dr2, fbond, vbond, fij, vtot, ksh, khyp, drcut, ddr, ddr3; rvec dx; ivec dt; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ksh = sqr(md->chargeA[aj])*ONE_4PI_EPS0/forceparams[type].anharm_polarize.alpha; /* 7*/ khyp = forceparams[type].anharm_polarize.khyp; drcut = forceparams[type].anharm_polarize.drcut; if (debug) { fprintf(debug, "POL: local ai = %d aj = %d ksh = %.3f\n", ai, aj, ksh); } ki = pbc_rvec_sub(pbc, x[ai], x[aj], dx); /* 3 */ dr2 = iprod(dx, dx); /* 5 */ dr = dr2*gmx_invsqrt(dr2); /* 10 */ *dvdlambda += harmonic(ksh, ksh, 0, 0, dr, lambda, &vbond, &fbond); /* 19 */ if (dr2 == 0.0) { continue; } if (dr > drcut) { ddr = dr-drcut; ddr3 = ddr*ddr*ddr; vbond += khyp*ddr*ddr3; fbond -= 4*khyp*ddr3; } fbond *= gmx_invsqrt(dr2); /* 6 */ vtot += vbond; /* 1*/ if (g) { ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt); ki = IVEC2IS(dt); } for (m = 0; (m < DIM); m++) /* 15 */ { fij = fbond*dx[m]; f[ai][m] += fij; f[aj][m] -= fij; fshift[ki][m] += fij; fshift[CENTRAL][m] -= fij; } } /* 72 TOTAL */ return vtot; } real water_pol(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { /* This routine implements anisotropic polarizibility for water, through * a shell connected to a dummy with spring constant that differ in the * three spatial dimensions in the molecular frame. */ int i, m, aO, aH1, aH2, aD, aS, type, type0; rvec dOH1, dOH2, dHH, dOD, dDS, nW, kk, dx, kdx, proj; #ifdef DEBUG rvec df; #endif real vtot, fij, r_HH, r_OD, r_nW, tx, ty, tz, qS; vtot = 0.0; if (nbonds > 0) { type0 = forceatoms[0]; aS = forceatoms[5]; qS = md->chargeA[aS]; kk[XX] = sqr(qS)*ONE_4PI_EPS0/forceparams[type0].wpol.al_x; kk[YY] = sqr(qS)*ONE_4PI_EPS0/forceparams[type0].wpol.al_y; kk[ZZ] = sqr(qS)*ONE_4PI_EPS0/forceparams[type0].wpol.al_z; r_HH = 1.0/forceparams[type0].wpol.rHH; r_OD = 1.0/forceparams[type0].wpol.rOD; if (debug) { fprintf(debug, "WPOL: qS = %10.5f aS = %5d\n", qS, aS); fprintf(debug, "WPOL: kk = %10.3f %10.3f %10.3f\n", kk[XX], kk[YY], kk[ZZ]); fprintf(debug, "WPOL: rOH = %10.3f rHH = %10.3f rOD = %10.3f\n", forceparams[type0].wpol.rOH, forceparams[type0].wpol.rHH, forceparams[type0].wpol.rOD); } for (i = 0; (i < nbonds); i += 6) { type = forceatoms[i]; if (type != type0) { gmx_fatal(FARGS, "Sorry, type = %d, type0 = %d, file = %s, line = %d", type, type0, __FILE__, __LINE__); } aO = forceatoms[i+1]; aH1 = forceatoms[i+2]; aH2 = forceatoms[i+3]; aD = forceatoms[i+4]; aS = forceatoms[i+5]; /* Compute vectors describing the water frame */ rvec_sub(x[aH1], x[aO], dOH1); rvec_sub(x[aH2], x[aO], dOH2); rvec_sub(x[aH2], x[aH1], dHH); rvec_sub(x[aD], x[aO], dOD); rvec_sub(x[aS], x[aD], dDS); cprod(dOH1, dOH2, nW); /* Compute inverse length of normal vector * (this one could be precomputed, but I'm too lazy now) */ r_nW = gmx_invsqrt(iprod(nW, nW)); /* This is for precision, but does not make a big difference, * it can go later. */ r_OD = gmx_invsqrt(iprod(dOD, dOD)); /* Normalize the vectors in the water frame */ svmul(r_nW, nW, nW); svmul(r_HH, dHH, dHH); svmul(r_OD, dOD, dOD); /* Compute displacement of shell along components of the vector */ dx[ZZ] = iprod(dDS, dOD); /* Compute projection on the XY plane: dDS - dx[ZZ]*dOD */ for (m = 0; (m < DIM); m++) { proj[m] = dDS[m]-dx[ZZ]*dOD[m]; } /*dx[XX] = iprod(dDS,nW); dx[YY] = iprod(dDS,dHH);*/ dx[XX] = iprod(proj, nW); for (m = 0; (m < DIM); m++) { proj[m] -= dx[XX]*nW[m]; } dx[YY] = iprod(proj, dHH); /*#define DEBUG*/ #ifdef DEBUG if (debug) { fprintf(debug, "WPOL: dx2=%10g dy2=%10g dz2=%10g sum=%10g dDS^2=%10g\n", sqr(dx[XX]), sqr(dx[YY]), sqr(dx[ZZ]), iprod(dx, dx), iprod(dDS, dDS)); fprintf(debug, "WPOL: dHH=(%10g,%10g,%10g)\n", dHH[XX], dHH[YY], dHH[ZZ]); fprintf(debug, "WPOL: dOD=(%10g,%10g,%10g), 1/r_OD = %10g\n", dOD[XX], dOD[YY], dOD[ZZ], 1/r_OD); fprintf(debug, "WPOL: nW =(%10g,%10g,%10g), 1/r_nW = %10g\n", nW[XX], nW[YY], nW[ZZ], 1/r_nW); fprintf(debug, "WPOL: dx =%10g, dy =%10g, dz =%10g\n", dx[XX], dx[YY], dx[ZZ]); fprintf(debug, "WPOL: dDSx=%10g, dDSy=%10g, dDSz=%10g\n", dDS[XX], dDS[YY], dDS[ZZ]); } #endif /* Now compute the forces and energy */ kdx[XX] = kk[XX]*dx[XX]; kdx[YY] = kk[YY]*dx[YY]; kdx[ZZ] = kk[ZZ]*dx[ZZ]; vtot += iprod(dx, kdx); for (m = 0; (m < DIM); m++) { /* This is a tensor operation but written out for speed */ tx = nW[m]*kdx[XX]; ty = dHH[m]*kdx[YY]; tz = dOD[m]*kdx[ZZ]; fij = -tx-ty-tz; #ifdef DEBUG df[m] = fij; #endif f[aS][m] += fij; f[aD][m] -= fij; } #ifdef DEBUG if (debug) { fprintf(debug, "WPOL: vwpol=%g\n", 0.5*iprod(dx, kdx)); fprintf(debug, "WPOL: df = (%10g, %10g, %10g)\n", df[XX], df[YY], df[ZZ]); } #endif } } return 0.5*vtot; } static real do_1_thole(const rvec xi, const rvec xj, rvec fi, rvec fj, const t_pbc *pbc, real qq, rvec fshift[], real afac) { rvec r12; real r12sq, r12_1, r12n, r12bar, v0, v1, fscal, ebar, fff; int m, t; t = pbc_rvec_sub(pbc, xi, xj, r12); /* 3 */ r12sq = iprod(r12, r12); /* 5 */ r12_1 = gmx_invsqrt(r12sq); /* 5 */ r12bar = afac/r12_1; /* 5 */ v0 = qq*ONE_4PI_EPS0*r12_1; /* 2 */ ebar = exp(-r12bar); /* 5 */ v1 = (1-(1+0.5*r12bar)*ebar); /* 4 */ fscal = ((v0*r12_1)*v1 - v0*0.5*afac*ebar*(r12bar+1))*r12_1; /* 9 */ if (debug) { fprintf(debug, "THOLE: v0 = %.3f v1 = %.3f r12= % .3f r12bar = %.3f fscal = %.3f ebar = %.3f\n", v0, v1, 1/r12_1, r12bar, fscal, ebar); } for (m = 0; (m < DIM); m++) { fff = fscal*r12[m]; fi[m] += fff; fj[m] -= fff; fshift[t][m] += fff; fshift[CENTRAL][m] -= fff; } /* 15 */ return v0*v1; /* 1 */ /* 54 */ } real thole_pol(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { /* Interaction between two pairs of particles with opposite charge */ int i, type, a1, da1, a2, da2; real q1, q2, qq, a, al1, al2, afac; real V = 0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; a1 = forceatoms[i++]; da1 = forceatoms[i++]; a2 = forceatoms[i++]; da2 = forceatoms[i++]; q1 = md->chargeA[da1]; q2 = md->chargeA[da2]; a = forceparams[type].thole.a; al1 = forceparams[type].thole.alpha1; al2 = forceparams[type].thole.alpha2; qq = q1*q2; afac = a*pow(al1*al2, -1.0/6.0); V += do_1_thole(x[a1], x[a2], f[a1], f[a2], pbc, qq, fshift, afac); V += do_1_thole(x[da1], x[a2], f[da1], f[a2], pbc, -qq, fshift, afac); V += do_1_thole(x[a1], x[da2], f[a1], f[da2], pbc, -qq, fshift, afac); V += do_1_thole(x[da1], x[da2], f[da1], f[da2], pbc, qq, fshift, afac); } /* 290 flops */ return V; } real bond_angle(const rvec xi, const rvec xj, const rvec xk, const t_pbc *pbc, rvec r_ij, rvec r_kj, real *costh, int *t1, int *t2) /* Return value is the angle between the bonds i-j and j-k */ { /* 41 FLOPS */ real th; *t1 = pbc_rvec_sub(pbc, xi, xj, r_ij); /* 3 */ *t2 = pbc_rvec_sub(pbc, xk, xj, r_kj); /* 3 */ *costh = cos_angle(r_ij, r_kj); /* 25 */ th = acos(*costh); /* 10 */ /* 41 TOTAL */ return th; } real angles(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { int i, ai, aj, ak, t1, t2, type; rvec r_ij, r_kj; real cos_theta, cos_theta2, theta, dVdt, va, vtot; ivec jt, dt_ij, dt_kj; vtot = 0.0; for (i = 0; i < nbonds; ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ak = forceatoms[i++]; theta = bond_angle(x[ai], x[aj], x[ak], pbc, r_ij, r_kj, &cos_theta, &t1, &t2); /* 41 */ *dvdlambda += harmonic(forceparams[type].harmonic.krA, forceparams[type].harmonic.krB, forceparams[type].harmonic.rA*DEG2RAD, forceparams[type].harmonic.rB*DEG2RAD, theta, lambda, &va, &dVdt); /* 21 */ vtot += va; cos_theta2 = sqr(cos_theta); if (cos_theta2 < 1) { int m; real st, sth; real cik, cii, ckk; real nrkj2, nrij2; real nrkj_1, nrij_1; rvec f_i, f_j, f_k; st = dVdt*gmx_invsqrt(1 - cos_theta2); /* 12 */ sth = st*cos_theta; /* 1 */ #ifdef DEBUG if (debug) { fprintf(debug, "ANGLES: theta = %10g vth = %10g dV/dtheta = %10g\n", theta*RAD2DEG, va, dVdt); } #endif nrij2 = iprod(r_ij, r_ij); /* 5 */ nrkj2 = iprod(r_kj, r_kj); /* 5 */ nrij_1 = gmx_invsqrt(nrij2); /* 10 */ nrkj_1 = gmx_invsqrt(nrkj2); /* 10 */ cik = st*nrij_1*nrkj_1; /* 2 */ cii = sth*nrij_1*nrij_1; /* 2 */ ckk = sth*nrkj_1*nrkj_1; /* 2 */ for (m = 0; m < DIM; m++) { /* 39 */ f_i[m] = -(cik*r_kj[m] - cii*r_ij[m]); f_k[m] = -(cik*r_ij[m] - ckk*r_kj[m]); f_j[m] = -f_i[m] - f_k[m]; f[ai][m] += f_i[m]; f[aj][m] += f_j[m]; f[ak][m] += f_k[m]; } if (g != NULL) { copy_ivec(SHIFT_IVEC(g, aj), jt); ivec_sub(SHIFT_IVEC(g, ai), jt, dt_ij); ivec_sub(SHIFT_IVEC(g, ak), jt, dt_kj); t1 = IVEC2IS(dt_ij); t2 = IVEC2IS(dt_kj); } rvec_inc(fshift[t1], f_i); rvec_inc(fshift[CENTRAL], f_j); rvec_inc(fshift[t2], f_k); } /* 161 TOTAL */ } return vtot; } #ifdef SIMD_BONDEDS /* As angles, but using SIMD to calculate many dihedrals at once. * This routines does not calculate energies and shift forces. */ static gmx_inline void angles_noener_simd(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], const t_pbc *pbc, const t_graph *g, real lambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { #define UNROLL GMX_SIMD_WIDTH_HERE const int nfa1 = 4; int i, iu, s, m; int type, ai[UNROLL], aj[UNROLL], ak[UNROLL]; real coeff_array[2*UNROLL+UNROLL], *coeff; real dr_array[2*DIM*UNROLL+UNROLL], *dr; real f_buf_array[6*UNROLL+UNROLL], *f_buf; gmx_mm_pr k_S, theta0_S; gmx_mm_pr rijx_S, rijy_S, rijz_S; gmx_mm_pr rkjx_S, rkjy_S, rkjz_S; gmx_mm_pr one_S; gmx_mm_pr min_one_plus_eps_S; gmx_mm_pr rij_rkj_S; gmx_mm_pr nrij2_S, nrij_1_S; gmx_mm_pr nrkj2_S, nrkj_1_S; gmx_mm_pr cos_S, invsin_S; gmx_mm_pr theta_S; gmx_mm_pr st_S, sth_S; gmx_mm_pr cik_S, cii_S, ckk_S; gmx_mm_pr f_ix_S, f_iy_S, f_iz_S; gmx_mm_pr f_kx_S, f_ky_S, f_kz_S; pbc_simd_t pbc_simd; /* Ensure register memory alignment */ coeff = gmx_simd_align_real(coeff_array); dr = gmx_simd_align_real(dr_array); f_buf = gmx_simd_align_real(f_buf_array); set_pbc_simd(pbc,&pbc_simd); one_S = gmx_set1_pr(1.0); /* The smallest number > -1 */ min_one_plus_eps_S = gmx_set1_pr(-1.0 + 2*GMX_REAL_EPS); /* nbonds is the number of angles times nfa1, here we step UNROLL angles */ for (i = 0; (i < nbonds); i += UNROLL*nfa1) { /* Collect atoms for UNROLL angles. * iu indexes into forceatoms, we should not let iu go beyond nbonds. */ iu = i; for (s = 0; s < UNROLL; s++) { type = forceatoms[iu]; ai[s] = forceatoms[iu+1]; aj[s] = forceatoms[iu+2]; ak[s] = forceatoms[iu+3]; coeff[s] = forceparams[type].harmonic.krA; coeff[UNROLL+s] = forceparams[type].harmonic.rA*DEG2RAD; /* If you can't use pbc_dx_simd below for PBC, e.g. because * you can't round in SIMD, use pbc_rvec_sub here. */ /* Store the non PBC corrected distances packed and aligned */ for (m = 0; m < DIM; m++) { dr[s + m *UNROLL] = x[ai[s]][m] - x[aj[s]][m]; dr[s + (DIM+m)*UNROLL] = x[ak[s]][m] - x[aj[s]][m]; } /* At the end fill the arrays with identical entries */ if (iu + nfa1 < nbonds) { iu += nfa1; } } k_S = gmx_load_pr(coeff); theta0_S = gmx_load_pr(coeff+UNROLL); rijx_S = gmx_load_pr(dr + 0*UNROLL); rijy_S = gmx_load_pr(dr + 1*UNROLL); rijz_S = gmx_load_pr(dr + 2*UNROLL); rkjx_S = gmx_load_pr(dr + 3*UNROLL); rkjy_S = gmx_load_pr(dr + 4*UNROLL); rkjz_S = gmx_load_pr(dr + 5*UNROLL); pbc_dx_simd(&rijx_S, &rijy_S, &rijz_S, &pbc_simd); pbc_dx_simd(&rkjx_S, &rkjy_S, &rkjz_S, &pbc_simd); rij_rkj_S = gmx_iprod_pr(rijx_S, rijy_S, rijz_S, rkjx_S, rkjy_S, rkjz_S); nrij2_S = gmx_norm2_pr(rijx_S, rijy_S, rijz_S); nrkj2_S = gmx_norm2_pr(rkjx_S, rkjy_S, rkjz_S); nrij_1_S = gmx_invsqrt_pr(nrij2_S); nrkj_1_S = gmx_invsqrt_pr(nrkj2_S); cos_S = gmx_mul_pr(rij_rkj_S, gmx_mul_pr(nrij_1_S, nrkj_1_S)); /* To allow for 180 degrees, we take the max of cos and -1 + 1bit, * so we can safely get the 1/sin from 1/sqrt(1 - cos^2). * This also ensures that rounding errors would cause the argument * of gmx_acos_pr to be < -1. * Note that we do not take precautions for cos(0)=1, so the outer * atoms in an angle should not be on top of each other. */ cos_S = gmx_max_pr(cos_S, min_one_plus_eps_S); theta_S = gmx_acos_pr(cos_S); invsin_S = gmx_invsqrt_pr(gmx_sub_pr(one_S, gmx_mul_pr(cos_S, cos_S))); st_S = gmx_mul_pr(gmx_mul_pr(k_S, gmx_sub_pr(theta0_S, theta_S)), invsin_S); sth_S = gmx_mul_pr(st_S, cos_S); cik_S = gmx_mul_pr(st_S, gmx_mul_pr(nrij_1_S, nrkj_1_S)); cii_S = gmx_mul_pr(sth_S, gmx_mul_pr(nrij_1_S, nrij_1_S)); ckk_S = gmx_mul_pr(sth_S, gmx_mul_pr(nrkj_1_S, nrkj_1_S)); f_ix_S = gmx_mul_pr(cii_S, rijx_S); f_ix_S = gmx_nmsub_pr(cik_S, rkjx_S, f_ix_S); f_iy_S = gmx_mul_pr(cii_S, rijy_S); f_iy_S = gmx_nmsub_pr(cik_S, rkjy_S, f_iy_S); f_iz_S = gmx_mul_pr(cii_S, rijz_S); f_iz_S = gmx_nmsub_pr(cik_S, rkjz_S, f_iz_S); f_kx_S = gmx_mul_pr(ckk_S, rkjx_S); f_kx_S = gmx_nmsub_pr(cik_S, rijx_S, f_kx_S); f_ky_S = gmx_mul_pr(ckk_S, rkjy_S); f_ky_S = gmx_nmsub_pr(cik_S, rijy_S, f_ky_S); f_kz_S = gmx_mul_pr(ckk_S, rkjz_S); f_kz_S = gmx_nmsub_pr(cik_S, rijz_S, f_kz_S); gmx_store_pr(f_buf + 0*UNROLL, f_ix_S); gmx_store_pr(f_buf + 1*UNROLL, f_iy_S); gmx_store_pr(f_buf + 2*UNROLL, f_iz_S); gmx_store_pr(f_buf + 3*UNROLL, f_kx_S); gmx_store_pr(f_buf + 4*UNROLL, f_ky_S); gmx_store_pr(f_buf + 5*UNROLL, f_kz_S); iu = i; s = 0; do { for (m = 0; m < DIM; m++) { f[ai[s]][m] += f_buf[s + m*UNROLL]; f[aj[s]][m] -= f_buf[s + m*UNROLL] + f_buf[s + (DIM+m)*UNROLL]; f[ak[s]][m] += f_buf[s + (DIM+m)*UNROLL]; } s++; iu += nfa1; } while (s < UNROLL && iu < nbonds); } #undef UNROLL } #endif /* SIMD_BONDEDS */ real linear_angles(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { int i, m, ai, aj, ak, t1, t2, type; rvec f_i, f_j, f_k; real L1, kA, kB, aA, aB, dr, dr2, va, vtot, a, b, klin; ivec jt, dt_ij, dt_kj; rvec r_ij, r_kj, r_ik, dx; L1 = 1-lambda; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ak = forceatoms[i++]; kA = forceparams[type].linangle.klinA; kB = forceparams[type].linangle.klinB; klin = L1*kA + lambda*kB; aA = forceparams[type].linangle.aA; aB = forceparams[type].linangle.aB; a = L1*aA+lambda*aB; b = 1-a; t1 = pbc_rvec_sub(pbc, x[ai], x[aj], r_ij); t2 = pbc_rvec_sub(pbc, x[ak], x[aj], r_kj); rvec_sub(r_ij, r_kj, r_ik); dr2 = 0; for (m = 0; (m < DIM); m++) { dr = -a * r_ij[m] - b * r_kj[m]; dr2 += dr*dr; dx[m] = dr; f_i[m] = a*klin*dr; f_k[m] = b*klin*dr; f_j[m] = -(f_i[m]+f_k[m]); f[ai][m] += f_i[m]; f[aj][m] += f_j[m]; f[ak][m] += f_k[m]; } va = 0.5*klin*dr2; *dvdlambda += 0.5*(kB-kA)*dr2 + klin*(aB-aA)*iprod(dx, r_ik); vtot += va; if (g) { copy_ivec(SHIFT_IVEC(g, aj), jt); ivec_sub(SHIFT_IVEC(g, ai), jt, dt_ij); ivec_sub(SHIFT_IVEC(g, ak), jt, dt_kj); t1 = IVEC2IS(dt_ij); t2 = IVEC2IS(dt_kj); } rvec_inc(fshift[t1], f_i); rvec_inc(fshift[CENTRAL], f_j); rvec_inc(fshift[t2], f_k); } /* 57 TOTAL */ return vtot; } real urey_bradley(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { int i, m, ai, aj, ak, t1, t2, type, ki; rvec r_ij, r_kj, r_ik; real cos_theta, cos_theta2, theta; real dVdt, va, vtot, dr, dr2, vbond, fbond, fik; real kthA, th0A, kUBA, r13A, kthB, th0B, kUBB, r13B; ivec jt, dt_ij, dt_kj, dt_ik; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ak = forceatoms[i++]; th0A = forceparams[type].u_b.thetaA*DEG2RAD; kthA = forceparams[type].u_b.kthetaA; r13A = forceparams[type].u_b.r13A; kUBA = forceparams[type].u_b.kUBA; th0B = forceparams[type].u_b.thetaB*DEG2RAD; kthB = forceparams[type].u_b.kthetaB; r13B = forceparams[type].u_b.r13B; kUBB = forceparams[type].u_b.kUBB; theta = bond_angle(x[ai], x[aj], x[ak], pbc, r_ij, r_kj, &cos_theta, &t1, &t2); /* 41 */ *dvdlambda += harmonic(kthA, kthB, th0A, th0B, theta, lambda, &va, &dVdt); /* 21 */ vtot += va; ki = pbc_rvec_sub(pbc, x[ai], x[ak], r_ik); /* 3 */ dr2 = iprod(r_ik, r_ik); /* 5 */ dr = dr2*gmx_invsqrt(dr2); /* 10 */ *dvdlambda += harmonic(kUBA, kUBB, r13A, r13B, dr, lambda, &vbond, &fbond); /* 19 */ cos_theta2 = sqr(cos_theta); /* 1 */ if (cos_theta2 < 1) { real st, sth; real cik, cii, ckk; real nrkj2, nrij2; rvec f_i, f_j, f_k; st = dVdt*gmx_invsqrt(1 - cos_theta2); /* 12 */ sth = st*cos_theta; /* 1 */ #ifdef DEBUG if (debug) { fprintf(debug, "ANGLES: theta = %10g vth = %10g dV/dtheta = %10g\n", theta*RAD2DEG, va, dVdt); } #endif nrkj2 = iprod(r_kj, r_kj); /* 5 */ nrij2 = iprod(r_ij, r_ij); cik = st*gmx_invsqrt(nrkj2*nrij2); /* 12 */ cii = sth/nrij2; /* 10 */ ckk = sth/nrkj2; /* 10 */ for (m = 0; (m < DIM); m++) /* 39 */ { f_i[m] = -(cik*r_kj[m]-cii*r_ij[m]); f_k[m] = -(cik*r_ij[m]-ckk*r_kj[m]); f_j[m] = -f_i[m]-f_k[m]; f[ai][m] += f_i[m]; f[aj][m] += f_j[m]; f[ak][m] += f_k[m]; } if (g) { copy_ivec(SHIFT_IVEC(g, aj), jt); ivec_sub(SHIFT_IVEC(g, ai), jt, dt_ij); ivec_sub(SHIFT_IVEC(g, ak), jt, dt_kj); t1 = IVEC2IS(dt_ij); t2 = IVEC2IS(dt_kj); } rvec_inc(fshift[t1], f_i); rvec_inc(fshift[CENTRAL], f_j); rvec_inc(fshift[t2], f_k); } /* 161 TOTAL */ /* Time for the bond calculations */ if (dr2 == 0.0) { continue; } vtot += vbond; /* 1*/ fbond *= gmx_invsqrt(dr2); /* 6 */ if (g) { ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, ak), dt_ik); ki = IVEC2IS(dt_ik); } for (m = 0; (m < DIM); m++) /* 15 */ { fik = fbond*r_ik[m]; f[ai][m] += fik; f[ak][m] -= fik; fshift[ki][m] += fik; fshift[CENTRAL][m] -= fik; } } return vtot; } real quartic_angles(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { int i, j, ai, aj, ak, t1, t2, type; rvec r_ij, r_kj; real cos_theta, cos_theta2, theta, dt, dVdt, va, dtp, c, vtot; ivec jt, dt_ij, dt_kj; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ak = forceatoms[i++]; theta = bond_angle(x[ai], x[aj], x[ak], pbc, r_ij, r_kj, &cos_theta, &t1, &t2); /* 41 */ dt = theta - forceparams[type].qangle.theta*DEG2RAD; /* 2 */ dVdt = 0; va = forceparams[type].qangle.c[0]; dtp = 1.0; for (j = 1; j <= 4; j++) { c = forceparams[type].qangle.c[j]; dVdt -= j*c*dtp; dtp *= dt; va += c*dtp; } /* 20 */ vtot += va; cos_theta2 = sqr(cos_theta); /* 1 */ if (cos_theta2 < 1) { int m; real st, sth; real cik, cii, ckk; real nrkj2, nrij2; rvec f_i, f_j, f_k; st = dVdt*gmx_invsqrt(1 - cos_theta2); /* 12 */ sth = st*cos_theta; /* 1 */ #ifdef DEBUG if (debug) { fprintf(debug, "ANGLES: theta = %10g vth = %10g dV/dtheta = %10g\n", theta*RAD2DEG, va, dVdt); } #endif nrkj2 = iprod(r_kj, r_kj); /* 5 */ nrij2 = iprod(r_ij, r_ij); cik = st*gmx_invsqrt(nrkj2*nrij2); /* 12 */ cii = sth/nrij2; /* 10 */ ckk = sth/nrkj2; /* 10 */ for (m = 0; (m < DIM); m++) /* 39 */ { f_i[m] = -(cik*r_kj[m]-cii*r_ij[m]); f_k[m] = -(cik*r_ij[m]-ckk*r_kj[m]); f_j[m] = -f_i[m]-f_k[m]; f[ai][m] += f_i[m]; f[aj][m] += f_j[m]; f[ak][m] += f_k[m]; } if (g) { copy_ivec(SHIFT_IVEC(g, aj), jt); ivec_sub(SHIFT_IVEC(g, ai), jt, dt_ij); ivec_sub(SHIFT_IVEC(g, ak), jt, dt_kj); t1 = IVEC2IS(dt_ij); t2 = IVEC2IS(dt_kj); } rvec_inc(fshift[t1], f_i); rvec_inc(fshift[CENTRAL], f_j); rvec_inc(fshift[t2], f_k); } /* 153 TOTAL */ } return vtot; } real dih_angle(const rvec xi, const rvec xj, const rvec xk, const rvec xl, const t_pbc *pbc, rvec r_ij, rvec r_kj, rvec r_kl, rvec m, rvec n, real *sign, int *t1, int *t2, int *t3) { real ipr, phi; *t1 = pbc_rvec_sub(pbc, xi, xj, r_ij); /* 3 */ *t2 = pbc_rvec_sub(pbc, xk, xj, r_kj); /* 3 */ *t3 = pbc_rvec_sub(pbc, xk, xl, r_kl); /* 3 */ cprod(r_ij, r_kj, m); /* 9 */ cprod(r_kj, r_kl, n); /* 9 */ phi = gmx_angle(m, n); /* 49 (assuming 25 for atan2) */ ipr = iprod(r_ij, n); /* 5 */ (*sign) = (ipr < 0.0) ? -1.0 : 1.0; phi = (*sign)*phi; /* 1 */ /* 82 TOTAL */ return phi; } #ifdef SIMD_BONDEDS /* As dih_angle above, but calculates 4 dihedral angles at once using SIMD, * also calculates the pre-factor required for the dihedral force update. * Note that bv and buf should be register aligned. */ static gmx_inline void dih_angle_simd(const rvec *x, const int *ai, const int *aj, const int *ak, const int *al, const pbc_simd_t *pbc, real *dr, gmx_mm_pr *phi_S, gmx_mm_pr *mx_S, gmx_mm_pr *my_S, gmx_mm_pr *mz_S, gmx_mm_pr *nx_S, gmx_mm_pr *ny_S, gmx_mm_pr *nz_S, gmx_mm_pr *nrkj_m2_S, gmx_mm_pr *nrkj_n2_S, real *p, real *q) { #define UNROLL GMX_SIMD_WIDTH_HERE int s, m; gmx_mm_pr rijx_S, rijy_S, rijz_S; gmx_mm_pr rkjx_S, rkjy_S, rkjz_S; gmx_mm_pr rklx_S, rkly_S, rklz_S; gmx_mm_pr cx_S, cy_S, cz_S; gmx_mm_pr cn_S; gmx_mm_pr s_S; gmx_mm_pr ipr_S; gmx_mm_pr iprm_S, iprn_S; gmx_mm_pr nrkj2_S, nrkj_1_S, nrkj_2_S, nrkj_S; gmx_mm_pr toler_S; gmx_mm_pr p_S, q_S; gmx_mm_pr nrkj2_min_S; gmx_mm_pr real_eps_S; /* Used to avoid division by zero. * We take into acount that we multiply the result by real_eps_S. */ nrkj2_min_S = gmx_set1_pr(GMX_REAL_MIN/(2*GMX_REAL_EPS)); /* The value of the last significant bit (GMX_REAL_EPS is half of that) */ real_eps_S = gmx_set1_pr(2*GMX_REAL_EPS); for (s = 0; s < UNROLL; s++) { /* If you can't use pbc_dx_simd below for PBC, e.g. because * you can't round in SIMD, use pbc_rvec_sub here. */ for (m = 0; m < DIM; m++) { dr[s + (0*DIM + m)*UNROLL] = x[ai[s]][m] - x[aj[s]][m]; dr[s + (1*DIM + m)*UNROLL] = x[ak[s]][m] - x[aj[s]][m]; dr[s + (2*DIM + m)*UNROLL] = x[ak[s]][m] - x[al[s]][m]; } } rijx_S = gmx_load_pr(dr + 0*UNROLL); rijy_S = gmx_load_pr(dr + 1*UNROLL); rijz_S = gmx_load_pr(dr + 2*UNROLL); rkjx_S = gmx_load_pr(dr + 3*UNROLL); rkjy_S = gmx_load_pr(dr + 4*UNROLL); rkjz_S = gmx_load_pr(dr + 5*UNROLL); rklx_S = gmx_load_pr(dr + 6*UNROLL); rkly_S = gmx_load_pr(dr + 7*UNROLL); rklz_S = gmx_load_pr(dr + 8*UNROLL); pbc_dx_simd(&rijx_S, &rijy_S, &rijz_S, pbc); pbc_dx_simd(&rkjx_S, &rkjy_S, &rkjz_S, pbc); pbc_dx_simd(&rklx_S, &rkly_S, &rklz_S, pbc); gmx_cprod_pr(rijx_S, rijy_S, rijz_S, rkjx_S, rkjy_S, rkjz_S, mx_S, my_S, mz_S); gmx_cprod_pr(rkjx_S, rkjy_S, rkjz_S, rklx_S, rkly_S, rklz_S, nx_S, ny_S, nz_S); gmx_cprod_pr(*mx_S, *my_S, *mz_S, *nx_S, *ny_S, *nz_S, &cx_S, &cy_S, &cz_S); cn_S = gmx_sqrt_pr(gmx_norm2_pr(cx_S, cy_S, cz_S)); s_S = gmx_iprod_pr(*mx_S, *my_S, *mz_S, *nx_S, *ny_S, *nz_S); /* Determine the dihedral angle, the sign might need correction */ *phi_S = gmx_atan2_pr(cn_S, s_S); ipr_S = gmx_iprod_pr(rijx_S, rijy_S, rijz_S, *nx_S, *ny_S, *nz_S); iprm_S = gmx_norm2_pr(*mx_S, *my_S, *mz_S); iprn_S = gmx_norm2_pr(*nx_S, *ny_S, *nz_S); nrkj2_S = gmx_norm2_pr(rkjx_S, rkjy_S, rkjz_S); /* Avoid division by zero. When zero, the result is multiplied by 0 * anyhow, so the 3 max below do not affect the final result. */ nrkj2_S = gmx_max_pr(nrkj2_S, nrkj2_min_S); nrkj_1_S = gmx_invsqrt_pr(nrkj2_S); nrkj_2_S = gmx_mul_pr(nrkj_1_S, nrkj_1_S); nrkj_S = gmx_mul_pr(nrkj2_S, nrkj_1_S); toler_S = gmx_mul_pr(nrkj2_S, real_eps_S); /* Here the plain-C code uses a conditional, but we can't do that in SIMD. * So we take a max with the tolerance instead. Since we multiply with * m or n later, the max does not affect the results. */ iprm_S = gmx_max_pr(iprm_S, toler_S); iprn_S = gmx_max_pr(iprn_S, toler_S); *nrkj_m2_S = gmx_mul_pr(nrkj_S, gmx_inv_pr(iprm_S)); *nrkj_n2_S = gmx_mul_pr(nrkj_S, gmx_inv_pr(iprn_S)); /* Set sign of phi_S with the sign of ipr_S; phi_S is currently positive */ *phi_S = gmx_cpsgn_nonneg_pr(ipr_S, *phi_S); p_S = gmx_iprod_pr(rijx_S, rijy_S, rijz_S, rkjx_S, rkjy_S, rkjz_S); p_S = gmx_mul_pr(p_S, nrkj_2_S); q_S = gmx_iprod_pr(rklx_S, rkly_S, rklz_S, rkjx_S, rkjy_S, rkjz_S); q_S = gmx_mul_pr(q_S, nrkj_2_S); gmx_store_pr(p, p_S); gmx_store_pr(q, q_S); #undef UNROLL } #endif /* SIMD_BONDEDS */ void do_dih_fup(int i, int j, int k, int l, real ddphi, rvec r_ij, rvec r_kj, rvec r_kl, rvec m, rvec n, rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, const rvec x[], int t1, int t2, int t3) { /* 143 FLOPS */ rvec f_i, f_j, f_k, f_l; rvec uvec, vvec, svec, dx_jl; real iprm, iprn, nrkj, nrkj2, nrkj_1, nrkj_2; real a, b, p, q, toler; ivec jt, dt_ij, dt_kj, dt_lj; iprm = iprod(m, m); /* 5 */ iprn = iprod(n, n); /* 5 */ nrkj2 = iprod(r_kj, r_kj); /* 5 */ toler = nrkj2*GMX_REAL_EPS; if ((iprm > toler) && (iprn > toler)) { nrkj_1 = gmx_invsqrt(nrkj2); /* 10 */ nrkj_2 = nrkj_1*nrkj_1; /* 1 */ nrkj = nrkj2*nrkj_1; /* 1 */ a = -ddphi*nrkj/iprm; /* 11 */ svmul(a, m, f_i); /* 3 */ b = ddphi*nrkj/iprn; /* 11 */ svmul(b, n, f_l); /* 3 */ p = iprod(r_ij, r_kj); /* 5 */ p *= nrkj_2; /* 1 */ q = iprod(r_kl, r_kj); /* 5 */ q *= nrkj_2; /* 1 */ svmul(p, f_i, uvec); /* 3 */ svmul(q, f_l, vvec); /* 3 */ rvec_sub(uvec, vvec, svec); /* 3 */ rvec_sub(f_i, svec, f_j); /* 3 */ rvec_add(f_l, svec, f_k); /* 3 */ rvec_inc(f[i], f_i); /* 3 */ rvec_dec(f[j], f_j); /* 3 */ rvec_dec(f[k], f_k); /* 3 */ rvec_inc(f[l], f_l); /* 3 */ if (g) { copy_ivec(SHIFT_IVEC(g, j), jt); ivec_sub(SHIFT_IVEC(g, i), jt, dt_ij); ivec_sub(SHIFT_IVEC(g, k), jt, dt_kj); ivec_sub(SHIFT_IVEC(g, l), jt, dt_lj); t1 = IVEC2IS(dt_ij); t2 = IVEC2IS(dt_kj); t3 = IVEC2IS(dt_lj); } else if (pbc) { t3 = pbc_rvec_sub(pbc, x[l], x[j], dx_jl); } else { t3 = CENTRAL; } rvec_inc(fshift[t1], f_i); rvec_dec(fshift[CENTRAL], f_j); rvec_dec(fshift[t2], f_k); rvec_inc(fshift[t3], f_l); } /* 112 TOTAL */ } /* As do_dih_fup above, but without shift forces */ static void do_dih_fup_noshiftf(int i, int j, int k, int l, real ddphi, rvec r_ij, rvec r_kj, rvec r_kl, rvec m, rvec n, rvec f[]) { rvec f_i, f_j, f_k, f_l; rvec uvec, vvec, svec, dx_jl; real iprm, iprn, nrkj, nrkj2, nrkj_1, nrkj_2; real a, b, p, q, toler; ivec jt, dt_ij, dt_kj, dt_lj; iprm = iprod(m, m); /* 5 */ iprn = iprod(n, n); /* 5 */ nrkj2 = iprod(r_kj, r_kj); /* 5 */ toler = nrkj2*GMX_REAL_EPS; if ((iprm > toler) && (iprn > toler)) { nrkj_1 = gmx_invsqrt(nrkj2); /* 10 */ nrkj_2 = nrkj_1*nrkj_1; /* 1 */ nrkj = nrkj2*nrkj_1; /* 1 */ a = -ddphi*nrkj/iprm; /* 11 */ svmul(a, m, f_i); /* 3 */ b = ddphi*nrkj/iprn; /* 11 */ svmul(b, n, f_l); /* 3 */ p = iprod(r_ij, r_kj); /* 5 */ p *= nrkj_2; /* 1 */ q = iprod(r_kl, r_kj); /* 5 */ q *= nrkj_2; /* 1 */ svmul(p, f_i, uvec); /* 3 */ svmul(q, f_l, vvec); /* 3 */ rvec_sub(uvec, vvec, svec); /* 3 */ rvec_sub(f_i, svec, f_j); /* 3 */ rvec_add(f_l, svec, f_k); /* 3 */ rvec_inc(f[i], f_i); /* 3 */ rvec_dec(f[j], f_j); /* 3 */ rvec_dec(f[k], f_k); /* 3 */ rvec_inc(f[l], f_l); /* 3 */ } } /* As do_dih_fup_noshiftf above, but with pre-calculated pre-factors */ static gmx_inline void do_dih_fup_noshiftf_precalc(int i, int j, int k, int l, real p, real q, real f_i_x, real f_i_y, real f_i_z, real mf_l_x, real mf_l_y, real mf_l_z, rvec f[]) { rvec f_i, f_j, f_k, f_l; rvec uvec, vvec, svec; f_i[XX] = f_i_x; f_i[YY] = f_i_y; f_i[ZZ] = f_i_z; f_l[XX] = -mf_l_x; f_l[YY] = -mf_l_y; f_l[ZZ] = -mf_l_z; svmul(p, f_i, uvec); svmul(q, f_l, vvec); rvec_sub(uvec, vvec, svec); rvec_sub(f_i, svec, f_j); rvec_add(f_l, svec, f_k); rvec_inc(f[i], f_i); rvec_dec(f[j], f_j); rvec_dec(f[k], f_k); rvec_inc(f[l], f_l); } real dopdihs(real cpA, real cpB, real phiA, real phiB, int mult, real phi, real lambda, real *V, real *F) { real v, dvdlambda, mdphi, v1, sdphi, ddphi; real L1 = 1.0 - lambda; real ph0 = (L1*phiA + lambda*phiB)*DEG2RAD; real dph0 = (phiB - phiA)*DEG2RAD; real cp = L1*cpA + lambda*cpB; mdphi = mult*phi - ph0; sdphi = sin(mdphi); ddphi = -cp*mult*sdphi; v1 = 1.0 + cos(mdphi); v = cp*v1; dvdlambda = (cpB - cpA)*v1 + cp*dph0*sdphi; *V = v; *F = ddphi; return dvdlambda; /* That was 40 flops */ } static void dopdihs_noener(real cpA, real cpB, real phiA, real phiB, int mult, real phi, real lambda, real *F) { real mdphi, sdphi, ddphi; real L1 = 1.0 - lambda; real ph0 = (L1*phiA + lambda*phiB)*DEG2RAD; real cp = L1*cpA + lambda*cpB; mdphi = mult*phi - ph0; sdphi = sin(mdphi); ddphi = -cp*mult*sdphi; *F = ddphi; /* That was 20 flops */ } static void dopdihs_mdphi(real cpA, real cpB, real phiA, real phiB, int mult, real phi, real lambda, real *cp, real *mdphi) { real L1 = 1.0 - lambda; real ph0 = (L1*phiA + lambda*phiB)*DEG2RAD; *cp = L1*cpA + lambda*cpB; *mdphi = mult*phi - ph0; } static real dopdihs_min(real cpA, real cpB, real phiA, real phiB, int mult, real phi, real lambda, real *V, real *F) /* similar to dopdihs, except for a minus sign * * and a different treatment of mult/phi0 */ { real v, dvdlambda, mdphi, v1, sdphi, ddphi; real L1 = 1.0 - lambda; real ph0 = (L1*phiA + lambda*phiB)*DEG2RAD; real dph0 = (phiB - phiA)*DEG2RAD; real cp = L1*cpA + lambda*cpB; mdphi = mult*(phi-ph0); sdphi = sin(mdphi); ddphi = cp*mult*sdphi; v1 = 1.0-cos(mdphi); v = cp*v1; dvdlambda = (cpB-cpA)*v1 + cp*dph0*sdphi; *V = v; *F = ddphi; return dvdlambda; /* That was 40 flops */ } real pdihs(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { int i, type, ai, aj, ak, al; int t1, t2, t3; rvec r_ij, r_kj, r_kl, m, n; real phi, sign, ddphi, vpd, vtot; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ak = forceatoms[i++]; al = forceatoms[i++]; phi = dih_angle(x[ai], x[aj], x[ak], x[al], pbc, r_ij, r_kj, r_kl, m, n, &sign, &t1, &t2, &t3); /* 84 */ *dvdlambda += dopdihs(forceparams[type].pdihs.cpA, forceparams[type].pdihs.cpB, forceparams[type].pdihs.phiA, forceparams[type].pdihs.phiB, forceparams[type].pdihs.mult, phi, lambda, &vpd, &ddphi); vtot += vpd; do_dih_fup(ai, aj, ak, al, ddphi, r_ij, r_kj, r_kl, m, n, f, fshift, pbc, g, x, t1, t2, t3); /* 112 */ #ifdef DEBUG fprintf(debug, "pdih: (%d,%d,%d,%d) phi=%g\n", ai, aj, ak, al, phi); #endif } /* 223 TOTAL */ return vtot; } void make_dp_periodic(real *dp) /* 1 flop? */ { /* dp cannot be outside (-pi,pi) */ if (*dp >= M_PI) { *dp -= 2*M_PI; } else if (*dp < -M_PI) { *dp += 2*M_PI; } return; } /* As pdihs above, but without calculating energies and shift forces */ static void pdihs_noener(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], const t_pbc *pbc, const t_graph *g, real lambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { int i, type, ai, aj, ak, al; int t1, t2, t3; rvec r_ij, r_kj, r_kl, m, n; real phi, sign, ddphi_tot, ddphi; for (i = 0; (i < nbonds); ) { ai = forceatoms[i+1]; aj = forceatoms[i+2]; ak = forceatoms[i+3]; al = forceatoms[i+4]; phi = dih_angle(x[ai], x[aj], x[ak], x[al], pbc, r_ij, r_kj, r_kl, m, n, &sign, &t1, &t2, &t3); ddphi_tot = 0; /* Loop over dihedrals working on the same atoms, * so we avoid recalculating angles and force distributions. */ do { type = forceatoms[i]; dopdihs_noener(forceparams[type].pdihs.cpA, forceparams[type].pdihs.cpB, forceparams[type].pdihs.phiA, forceparams[type].pdihs.phiB, forceparams[type].pdihs.mult, phi, lambda, &ddphi); ddphi_tot += ddphi; i += 5; } while (i < nbonds && forceatoms[i+1] == ai && forceatoms[i+2] == aj && forceatoms[i+3] == ak && forceatoms[i+4] == al); do_dih_fup_noshiftf(ai, aj, ak, al, ddphi_tot, r_ij, r_kj, r_kl, m, n, f); } } #ifdef SIMD_BONDEDS /* As pdihs_noner above, but using SIMD to calculate many dihedrals at once */ static void pdihs_noener_simd(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], const t_pbc *pbc, const t_graph *g, real lambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { #define UNROLL GMX_SIMD_WIDTH_HERE const int nfa1 = 5; int i, iu, s; int type, ai[UNROLL], aj[UNROLL], ak[UNROLL], al[UNROLL]; int t1[UNROLL], t2[UNROLL], t3[UNROLL]; real ddphi; real dr_array[3*DIM*UNROLL+UNROLL], *dr; real buf_array[7*UNROLL+UNROLL], *buf; real *cp, *phi0, *mult, *phi, *p, *q, *sf_i, *msf_l; gmx_mm_pr phi0_S, phi_S; gmx_mm_pr mx_S, my_S, mz_S; gmx_mm_pr nx_S, ny_S, nz_S; gmx_mm_pr nrkj_m2_S, nrkj_n2_S; gmx_mm_pr cp_S, mdphi_S, mult_S; gmx_mm_pr sin_S, cos_S; gmx_mm_pr mddphi_S; gmx_mm_pr sf_i_S, msf_l_S; pbc_simd_t pbc_simd; /* Ensure SIMD register alignment */ dr = gmx_simd_align_real(dr_array); buf = gmx_simd_align_real(buf_array); /* Extract aligned pointer for parameters and variables */ cp = buf + 0*UNROLL; phi0 = buf + 1*UNROLL; mult = buf + 2*UNROLL; p = buf + 3*UNROLL; q = buf + 4*UNROLL; sf_i = buf + 5*UNROLL; msf_l = buf + 6*UNROLL; set_pbc_simd(pbc, &pbc_simd); /* nbonds is the number of dihedrals times nfa1, here we step UNROLL dihs */ for (i = 0; (i < nbonds); i += UNROLL*nfa1) { /* Collect atoms quadruplets for UNROLL dihedrals. * iu indexes into forceatoms, we should not let iu go beyond nbonds. */ iu = i; for (s = 0; s < UNROLL; s++) { type = forceatoms[iu]; ai[s] = forceatoms[iu+1]; aj[s] = forceatoms[iu+2]; ak[s] = forceatoms[iu+3]; al[s] = forceatoms[iu+4]; cp[s] = forceparams[type].pdihs.cpA; phi0[s] = forceparams[type].pdihs.phiA*DEG2RAD; mult[s] = forceparams[type].pdihs.mult; /* At the end fill the arrays with identical entries */ if (iu + nfa1 < nbonds) { iu += nfa1; } } /* Caclulate UNROLL dihedral angles at once */ dih_angle_simd(x, ai, aj, ak, al, &pbc_simd, dr, &phi_S, &mx_S, &my_S, &mz_S, &nx_S, &ny_S, &nz_S, &nrkj_m2_S, &nrkj_n2_S, p, q); cp_S = gmx_load_pr(cp); phi0_S = gmx_load_pr(phi0); mult_S = gmx_load_pr(mult); mdphi_S = gmx_sub_pr(gmx_mul_pr(mult_S, phi_S), phi0_S); /* Calculate UNROLL sines at once */ gmx_sincos_pr(mdphi_S, &sin_S, &cos_S); mddphi_S = gmx_mul_pr(gmx_mul_pr(cp_S, mult_S), sin_S); sf_i_S = gmx_mul_pr(mddphi_S, nrkj_m2_S); msf_l_S = gmx_mul_pr(mddphi_S, nrkj_n2_S); /* After this m?_S will contain f[i] */ mx_S = gmx_mul_pr(sf_i_S, mx_S); my_S = gmx_mul_pr(sf_i_S, my_S); mz_S = gmx_mul_pr(sf_i_S, mz_S); /* After this m?_S will contain -f[l] */ nx_S = gmx_mul_pr(msf_l_S, nx_S); ny_S = gmx_mul_pr(msf_l_S, ny_S); nz_S = gmx_mul_pr(msf_l_S, nz_S); gmx_store_pr(dr + 0*UNROLL, mx_S); gmx_store_pr(dr + 1*UNROLL, my_S); gmx_store_pr(dr + 2*UNROLL, mz_S); gmx_store_pr(dr + 3*UNROLL, nx_S); gmx_store_pr(dr + 4*UNROLL, ny_S); gmx_store_pr(dr + 5*UNROLL, nz_S); iu = i; s = 0; do { do_dih_fup_noshiftf_precalc(ai[s], aj[s], ak[s], al[s], p[s], q[s], dr[ XX *UNROLL+s], dr[ YY *UNROLL+s], dr[ ZZ *UNROLL+s], dr[(DIM+XX)*UNROLL+s], dr[(DIM+YY)*UNROLL+s], dr[(DIM+ZZ)*UNROLL+s], f); s++; iu += nfa1; } while (s < UNROLL && iu < nbonds); } #undef UNROLL } #endif /* SIMD_BONDEDS */ real idihs(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { int i, type, ai, aj, ak, al; int t1, t2, t3; real phi, phi0, dphi0, ddphi, sign, vtot; rvec r_ij, r_kj, r_kl, m, n; real L1, kk, dp, dp2, kA, kB, pA, pB, dvdl_term; L1 = 1.0-lambda; dvdl_term = 0; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ak = forceatoms[i++]; al = forceatoms[i++]; phi = dih_angle(x[ai], x[aj], x[ak], x[al], pbc, r_ij, r_kj, r_kl, m, n, &sign, &t1, &t2, &t3); /* 84 */ /* phi can jump if phi0 is close to Pi/-Pi, which will cause huge * force changes if we just apply a normal harmonic. * Instead, we first calculate phi-phi0 and take it modulo (-Pi,Pi). * This means we will never have the periodicity problem, unless * the dihedral is Pi away from phiO, which is very unlikely due to * the potential. */ kA = forceparams[type].harmonic.krA; kB = forceparams[type].harmonic.krB; pA = forceparams[type].harmonic.rA; pB = forceparams[type].harmonic.rB; kk = L1*kA + lambda*kB; phi0 = (L1*pA + lambda*pB)*DEG2RAD; dphi0 = (pB - pA)*DEG2RAD; dp = phi-phi0; make_dp_periodic(&dp); dp2 = dp*dp; vtot += 0.5*kk*dp2; ddphi = -kk*dp; dvdl_term += 0.5*(kB - kA)*dp2 - kk*dphi0*dp; do_dih_fup(ai, aj, ak, al, (real)(-ddphi), r_ij, r_kj, r_kl, m, n, f, fshift, pbc, g, x, t1, t2, t3); /* 112 */ /* 218 TOTAL */ #ifdef DEBUG if (debug) { fprintf(debug, "idih: (%d,%d,%d,%d) phi=%g\n", ai, aj, ak, al, phi); } #endif } *dvdlambda += dvdl_term; return vtot; } real posres(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec vir_diag, t_pbc *pbc, real lambda, real *dvdlambda, int refcoord_scaling, int ePBC, rvec comA, rvec comB) { int i, ai, m, d, type, ki, npbcdim = 0; const t_iparams *pr; real L1; real vtot, kk, fm; real posA, posB, ref = 0; rvec comA_sc, comB_sc, rdist, dpdl, pos, dx; gmx_bool bForceValid = TRUE; if ((f == NULL) || (vir_diag == NULL)) /* should both be null together! */ { bForceValid = FALSE; } npbcdim = ePBC2npbcdim(ePBC); if (refcoord_scaling == erscCOM) { clear_rvec(comA_sc); clear_rvec(comB_sc); for (m = 0; m < npbcdim; m++) { for (d = m; d < npbcdim; d++) { comA_sc[m] += comA[d]*pbc->box[d][m]; comB_sc[m] += comB[d]*pbc->box[d][m]; } } } L1 = 1.0 - lambda; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; pr = &forceparams[type]; for (m = 0; m < DIM; m++) { posA = forceparams[type].posres.pos0A[m]; posB = forceparams[type].posres.pos0B[m]; if (m < npbcdim) { switch (refcoord_scaling) { case erscNO: ref = 0; rdist[m] = L1*posA + lambda*posB; dpdl[m] = posB - posA; break; case erscALL: /* Box relative coordinates are stored for dimensions with pbc */ posA *= pbc->box[m][m]; posB *= pbc->box[m][m]; for (d = m+1; d < npbcdim; d++) { posA += forceparams[type].posres.pos0A[d]*pbc->box[d][m]; posB += forceparams[type].posres.pos0B[d]*pbc->box[d][m]; } ref = L1*posA + lambda*posB; rdist[m] = 0; dpdl[m] = posB - posA; break; case erscCOM: ref = L1*comA_sc[m] + lambda*comB_sc[m]; rdist[m] = L1*posA + lambda*posB; dpdl[m] = comB_sc[m] - comA_sc[m] + posB - posA; break; default: gmx_fatal(FARGS, "No such scaling method implemented"); } } else { ref = L1*posA + lambda*posB; rdist[m] = 0; dpdl[m] = posB - posA; } /* We do pbc_dx with ref+rdist, * since with only ref we can be up to half a box vector wrong. */ pos[m] = ref + rdist[m]; } if (pbc) { pbc_dx(pbc, x[ai], pos, dx); } else { rvec_sub(x[ai], pos, dx); } for (m = 0; (m < DIM); m++) { kk = L1*pr->posres.fcA[m] + lambda*pr->posres.fcB[m]; fm = -kk*dx[m]; vtot += 0.5*kk*dx[m]*dx[m]; *dvdlambda += 0.5*(pr->posres.fcB[m] - pr->posres.fcA[m])*dx[m]*dx[m] -fm*dpdl[m]; /* Here we correct for the pbc_dx which included rdist */ if (bForceValid) { f[ai][m] += fm; vir_diag[m] -= 0.5*(dx[m] + rdist[m])*fm; } } } return vtot; } static real low_angres(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, gmx_bool bZAxis) { int i, m, type, ai, aj, ak, al; int t1, t2; real phi, cos_phi, cos_phi2, vid, vtot, dVdphi; rvec r_ij, r_kl, f_i, f_k = {0, 0, 0}; real st, sth, nrij2, nrkl2, c, cij, ckl; ivec dt; t2 = 0; /* avoid warning with gcc-3.3. It is never used uninitialized */ vtot = 0.0; ak = al = 0; /* to avoid warnings */ for (i = 0; i < nbonds; ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; t1 = pbc_rvec_sub(pbc, x[aj], x[ai], r_ij); /* 3 */ if (!bZAxis) { ak = forceatoms[i++]; al = forceatoms[i++]; t2 = pbc_rvec_sub(pbc, x[al], x[ak], r_kl); /* 3 */ } else { r_kl[XX] = 0; r_kl[YY] = 0; r_kl[ZZ] = 1; } cos_phi = cos_angle(r_ij, r_kl); /* 25 */ phi = acos(cos_phi); /* 10 */ *dvdlambda += dopdihs_min(forceparams[type].pdihs.cpA, forceparams[type].pdihs.cpB, forceparams[type].pdihs.phiA, forceparams[type].pdihs.phiB, forceparams[type].pdihs.mult, phi, lambda, &vid, &dVdphi); /* 40 */ vtot += vid; cos_phi2 = sqr(cos_phi); /* 1 */ if (cos_phi2 < 1) { st = -dVdphi*gmx_invsqrt(1 - cos_phi2); /* 12 */ sth = st*cos_phi; /* 1 */ nrij2 = iprod(r_ij, r_ij); /* 5 */ nrkl2 = iprod(r_kl, r_kl); /* 5 */ c = st*gmx_invsqrt(nrij2*nrkl2); /* 11 */ cij = sth/nrij2; /* 10 */ ckl = sth/nrkl2; /* 10 */ for (m = 0; m < DIM; m++) /* 18+18 */ { f_i[m] = (c*r_kl[m]-cij*r_ij[m]); f[ai][m] += f_i[m]; f[aj][m] -= f_i[m]; if (!bZAxis) { f_k[m] = (c*r_ij[m]-ckl*r_kl[m]); f[ak][m] += f_k[m]; f[al][m] -= f_k[m]; } } if (g) { ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt); t1 = IVEC2IS(dt); } rvec_inc(fshift[t1], f_i); rvec_dec(fshift[CENTRAL], f_i); if (!bZAxis) { if (g) { ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, al), dt); t2 = IVEC2IS(dt); } rvec_inc(fshift[t2], f_k); rvec_dec(fshift[CENTRAL], f_k); } } } return vtot; /* 184 / 157 (bZAxis) total */ } real angres(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { return low_angres(nbonds, forceatoms, forceparams, x, f, fshift, pbc, g, lambda, dvdlambda, FALSE); } real angresz(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { return low_angres(nbonds, forceatoms, forceparams, x, f, fshift, pbc, g, lambda, dvdlambda, TRUE); } real dihres(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { real vtot = 0; int ai, aj, ak, al, i, k, type, t1, t2, t3; real phi0A, phi0B, dphiA, dphiB, kfacA, kfacB, phi0, dphi, kfac; real phi, ddphi, ddp, ddp2, dp, sign, d2r, fc, L1; rvec r_ij, r_kj, r_kl, m, n; L1 = 1.0-lambda; d2r = DEG2RAD; k = 0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ak = forceatoms[i++]; al = forceatoms[i++]; phi0A = forceparams[type].dihres.phiA*d2r; dphiA = forceparams[type].dihres.dphiA*d2r; kfacA = forceparams[type].dihres.kfacA; phi0B = forceparams[type].dihres.phiB*d2r; dphiB = forceparams[type].dihres.dphiB*d2r; kfacB = forceparams[type].dihres.kfacB; phi0 = L1*phi0A + lambda*phi0B; dphi = L1*dphiA + lambda*dphiB; kfac = L1*kfacA + lambda*kfacB; phi = dih_angle(x[ai], x[aj], x[ak], x[al], pbc, r_ij, r_kj, r_kl, m, n, &sign, &t1, &t2, &t3); /* 84 flops */ if (debug) { fprintf(debug, "dihres[%d]: %d %d %d %d : phi=%f, dphi=%f, kfac=%f\n", k++, ai, aj, ak, al, phi0, dphi, kfac); } /* phi can jump if phi0 is close to Pi/-Pi, which will cause huge * force changes if we just apply a normal harmonic. * Instead, we first calculate phi-phi0 and take it modulo (-Pi,Pi). * This means we will never have the periodicity problem, unless * the dihedral is Pi away from phiO, which is very unlikely due to * the potential. */ dp = phi-phi0; make_dp_periodic(&dp); if (dp > dphi) { ddp = dp-dphi; } else if (dp < -dphi) { ddp = dp+dphi; } else { ddp = 0; } if (ddp != 0.0) { ddp2 = ddp*ddp; vtot += 0.5*kfac*ddp2; ddphi = kfac*ddp; *dvdlambda += 0.5*(kfacB - kfacA)*ddp2; /* lambda dependence from changing restraint distances */ if (ddp > 0) { *dvdlambda -= kfac*ddp*((dphiB - dphiA)+(phi0B - phi0A)); } else if (ddp < 0) { *dvdlambda += kfac*ddp*((dphiB - dphiA)-(phi0B - phi0A)); } do_dih_fup(ai, aj, ak, al, ddphi, r_ij, r_kj, r_kl, m, n, f, fshift, pbc, g, x, t1, t2, t3); /* 112 */ } } return vtot; } real unimplemented(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { gmx_impl("*** you are using a not implemented function"); return 0.0; /* To make the compiler happy */ } real rbdihs(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { const real c0 = 0.0, c1 = 1.0, c2 = 2.0, c3 = 3.0, c4 = 4.0, c5 = 5.0; int type, ai, aj, ak, al, i, j; int t1, t2, t3; rvec r_ij, r_kj, r_kl, m, n; real parmA[NR_RBDIHS]; real parmB[NR_RBDIHS]; real parm[NR_RBDIHS]; real cos_phi, phi, rbp, rbpBA; real v, sign, ddphi, sin_phi; real cosfac, vtot; real L1 = 1.0-lambda; real dvdl_term = 0; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ak = forceatoms[i++]; al = forceatoms[i++]; phi = dih_angle(x[ai], x[aj], x[ak], x[al], pbc, r_ij, r_kj, r_kl, m, n, &sign, &t1, &t2, &t3); /* 84 */ /* Change to polymer convention */ if (phi < c0) { phi += M_PI; } else { phi -= M_PI; /* 1 */ } cos_phi = cos(phi); /* Beware of accuracy loss, cannot use 1-sqrt(cos^2) ! */ sin_phi = sin(phi); for (j = 0; (j < NR_RBDIHS); j++) { parmA[j] = forceparams[type].rbdihs.rbcA[j]; parmB[j] = forceparams[type].rbdihs.rbcB[j]; parm[j] = L1*parmA[j]+lambda*parmB[j]; } /* Calculate cosine powers */ /* Calculate the energy */ /* Calculate the derivative */ v = parm[0]; dvdl_term += (parmB[0]-parmA[0]); ddphi = c0; cosfac = c1; rbp = parm[1]; rbpBA = parmB[1]-parmA[1]; ddphi += rbp*cosfac; cosfac *= cos_phi; v += cosfac*rbp; dvdl_term += cosfac*rbpBA; rbp = parm[2]; rbpBA = parmB[2]-parmA[2]; ddphi += c2*rbp*cosfac; cosfac *= cos_phi; v += cosfac*rbp; dvdl_term += cosfac*rbpBA; rbp = parm[3]; rbpBA = parmB[3]-parmA[3]; ddphi += c3*rbp*cosfac; cosfac *= cos_phi; v += cosfac*rbp; dvdl_term += cosfac*rbpBA; rbp = parm[4]; rbpBA = parmB[4]-parmA[4]; ddphi += c4*rbp*cosfac; cosfac *= cos_phi; v += cosfac*rbp; dvdl_term += cosfac*rbpBA; rbp = parm[5]; rbpBA = parmB[5]-parmA[5]; ddphi += c5*rbp*cosfac; cosfac *= cos_phi; v += cosfac*rbp; dvdl_term += cosfac*rbpBA; ddphi = -ddphi*sin_phi; /* 11 */ do_dih_fup(ai, aj, ak, al, ddphi, r_ij, r_kj, r_kl, m, n, f, fshift, pbc, g, x, t1, t2, t3); /* 112 */ vtot += v; } *dvdlambda += dvdl_term; return vtot; } int cmap_setup_grid_index(int ip, int grid_spacing, int *ipm1, int *ipp1, int *ipp2) { int im1, ip1, ip2; if (ip < 0) { ip = ip + grid_spacing - 1; } else if (ip > grid_spacing) { ip = ip - grid_spacing - 1; } im1 = ip - 1; ip1 = ip + 1; ip2 = ip + 2; if (ip == 0) { im1 = grid_spacing - 1; } else if (ip == grid_spacing-2) { ip2 = 0; } else if (ip == grid_spacing-1) { ip1 = 0; ip2 = 1; } *ipm1 = im1; *ipp1 = ip1; *ipp2 = ip2; return ip; } real cmap_dihs(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const gmx_cmap_t *cmap_grid, const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { int i, j, k, n, idx; int ai, aj, ak, al, am; int a1i, a1j, a1k, a1l, a2i, a2j, a2k, a2l; int type, cmapA; int t11, t21, t31, t12, t22, t32; int iphi1, ip1m1, ip1p1, ip1p2; int iphi2, ip2m1, ip2p1, ip2p2; int l1, l2, l3, l4; int pos1, pos2, pos3, pos4, tmp; real ty[4], ty1[4], ty2[4], ty12[4], tc[16], tx[16]; real phi1, psi1, cos_phi1, sin_phi1, sign1, xphi1; real phi2, psi2, cos_phi2, sin_phi2, sign2, xphi2; real dx, xx, tt, tu, e, df1, df2, ddf1, ddf2, ddf12, vtot; real ra21, rb21, rg21, rg1, rgr1, ra2r1, rb2r1, rabr1; real ra22, rb22, rg22, rg2, rgr2, ra2r2, rb2r2, rabr2; real fg1, hg1, fga1, hgb1, gaa1, gbb1; real fg2, hg2, fga2, hgb2, gaa2, gbb2; real fac; rvec r1_ij, r1_kj, r1_kl, m1, n1; rvec r2_ij, r2_kj, r2_kl, m2, n2; rvec f1_i, f1_j, f1_k, f1_l; rvec f2_i, f2_j, f2_k, f2_l; rvec a1, b1, a2, b2; rvec f1, g1, h1, f2, g2, h2; rvec dtf1, dtg1, dth1, dtf2, dtg2, dth2; ivec jt1, dt1_ij, dt1_kj, dt1_lj; ivec jt2, dt2_ij, dt2_kj, dt2_lj; const real *cmapd; int loop_index[4][4] = { {0, 4, 8, 12}, {1, 5, 9, 13}, {2, 6, 10, 14}, {3, 7, 11, 15} }; /* Total CMAP energy */ vtot = 0; for (n = 0; n < nbonds; ) { /* Five atoms are involved in the two torsions */ type = forceatoms[n++]; ai = forceatoms[n++]; aj = forceatoms[n++]; ak = forceatoms[n++]; al = forceatoms[n++]; am = forceatoms[n++]; /* Which CMAP type is this */ cmapA = forceparams[type].cmap.cmapA; cmapd = cmap_grid->cmapdata[cmapA].cmap; /* First torsion */ a1i = ai; a1j = aj; a1k = ak; a1l = al; phi1 = dih_angle(x[a1i], x[a1j], x[a1k], x[a1l], pbc, r1_ij, r1_kj, r1_kl, m1, n1, &sign1, &t11, &t21, &t31); /* 84 */ cos_phi1 = cos(phi1); a1[0] = r1_ij[1]*r1_kj[2]-r1_ij[2]*r1_kj[1]; a1[1] = r1_ij[2]*r1_kj[0]-r1_ij[0]*r1_kj[2]; a1[2] = r1_ij[0]*r1_kj[1]-r1_ij[1]*r1_kj[0]; /* 9 */ b1[0] = r1_kl[1]*r1_kj[2]-r1_kl[2]*r1_kj[1]; b1[1] = r1_kl[2]*r1_kj[0]-r1_kl[0]*r1_kj[2]; b1[2] = r1_kl[0]*r1_kj[1]-r1_kl[1]*r1_kj[0]; /* 9 */ tmp = pbc_rvec_sub(pbc, x[a1l], x[a1k], h1); ra21 = iprod(a1, a1); /* 5 */ rb21 = iprod(b1, b1); /* 5 */ rg21 = iprod(r1_kj, r1_kj); /* 5 */ rg1 = sqrt(rg21); rgr1 = 1.0/rg1; ra2r1 = 1.0/ra21; rb2r1 = 1.0/rb21; rabr1 = sqrt(ra2r1*rb2r1); sin_phi1 = rg1 * rabr1 * iprod(a1, h1) * (-1); if (cos_phi1 < -0.5 || cos_phi1 > 0.5) { phi1 = asin(sin_phi1); if (cos_phi1 < 0) { if (phi1 > 0) { phi1 = M_PI - phi1; } else { phi1 = -M_PI - phi1; } } } else { phi1 = acos(cos_phi1); if (sin_phi1 < 0) { phi1 = -phi1; } } xphi1 = phi1 + M_PI; /* 1 */ /* Second torsion */ a2i = aj; a2j = ak; a2k = al; a2l = am; phi2 = dih_angle(x[a2i], x[a2j], x[a2k], x[a2l], pbc, r2_ij, r2_kj, r2_kl, m2, n2, &sign2, &t12, &t22, &t32); /* 84 */ cos_phi2 = cos(phi2); a2[0] = r2_ij[1]*r2_kj[2]-r2_ij[2]*r2_kj[1]; a2[1] = r2_ij[2]*r2_kj[0]-r2_ij[0]*r2_kj[2]; a2[2] = r2_ij[0]*r2_kj[1]-r2_ij[1]*r2_kj[0]; /* 9 */ b2[0] = r2_kl[1]*r2_kj[2]-r2_kl[2]*r2_kj[1]; b2[1] = r2_kl[2]*r2_kj[0]-r2_kl[0]*r2_kj[2]; b2[2] = r2_kl[0]*r2_kj[1]-r2_kl[1]*r2_kj[0]; /* 9 */ tmp = pbc_rvec_sub(pbc, x[a2l], x[a2k], h2); ra22 = iprod(a2, a2); /* 5 */ rb22 = iprod(b2, b2); /* 5 */ rg22 = iprod(r2_kj, r2_kj); /* 5 */ rg2 = sqrt(rg22); rgr2 = 1.0/rg2; ra2r2 = 1.0/ra22; rb2r2 = 1.0/rb22; rabr2 = sqrt(ra2r2*rb2r2); sin_phi2 = rg2 * rabr2 * iprod(a2, h2) * (-1); if (cos_phi2 < -0.5 || cos_phi2 > 0.5) { phi2 = asin(sin_phi2); if (cos_phi2 < 0) { if (phi2 > 0) { phi2 = M_PI - phi2; } else { phi2 = -M_PI - phi2; } } } else { phi2 = acos(cos_phi2); if (sin_phi2 < 0) { phi2 = -phi2; } } xphi2 = phi2 + M_PI; /* 1 */ /* Range mangling */ if (xphi1 < 0) { xphi1 = xphi1 + 2*M_PI; } else if (xphi1 >= 2*M_PI) { xphi1 = xphi1 - 2*M_PI; } if (xphi2 < 0) { xphi2 = xphi2 + 2*M_PI; } else if (xphi2 >= 2*M_PI) { xphi2 = xphi2 - 2*M_PI; } /* Number of grid points */ dx = 2*M_PI / cmap_grid->grid_spacing; /* Where on the grid are we */ iphi1 = (int)(xphi1/dx); iphi2 = (int)(xphi2/dx); iphi1 = cmap_setup_grid_index(iphi1, cmap_grid->grid_spacing, &ip1m1, &ip1p1, &ip1p2); iphi2 = cmap_setup_grid_index(iphi2, cmap_grid->grid_spacing, &ip2m1, &ip2p1, &ip2p2); pos1 = iphi1*cmap_grid->grid_spacing+iphi2; pos2 = ip1p1*cmap_grid->grid_spacing+iphi2; pos3 = ip1p1*cmap_grid->grid_spacing+ip2p1; pos4 = iphi1*cmap_grid->grid_spacing+ip2p1; ty[0] = cmapd[pos1*4]; ty[1] = cmapd[pos2*4]; ty[2] = cmapd[pos3*4]; ty[3] = cmapd[pos4*4]; ty1[0] = cmapd[pos1*4+1]; ty1[1] = cmapd[pos2*4+1]; ty1[2] = cmapd[pos3*4+1]; ty1[3] = cmapd[pos4*4+1]; ty2[0] = cmapd[pos1*4+2]; ty2[1] = cmapd[pos2*4+2]; ty2[2] = cmapd[pos3*4+2]; ty2[3] = cmapd[pos4*4+2]; ty12[0] = cmapd[pos1*4+3]; ty12[1] = cmapd[pos2*4+3]; ty12[2] = cmapd[pos3*4+3]; ty12[3] = cmapd[pos4*4+3]; /* Switch to degrees */ dx = 360.0 / cmap_grid->grid_spacing; xphi1 = xphi1 * RAD2DEG; xphi2 = xphi2 * RAD2DEG; for (i = 0; i < 4; i++) /* 16 */ { tx[i] = ty[i]; tx[i+4] = ty1[i]*dx; tx[i+8] = ty2[i]*dx; tx[i+12] = ty12[i]*dx*dx; } idx = 0; for (i = 0; i < 4; i++) /* 1056 */ { for (j = 0; j < 4; j++) { xx = 0; for (k = 0; k < 16; k++) { xx = xx + cmap_coeff_matrix[k*16+idx]*tx[k]; } idx++; tc[i*4+j] = xx; } } tt = (xphi1-iphi1*dx)/dx; tu = (xphi2-iphi2*dx)/dx; e = 0; df1 = 0; df2 = 0; ddf1 = 0; ddf2 = 0; ddf12 = 0; for (i = 3; i >= 0; i--) { l1 = loop_index[i][3]; l2 = loop_index[i][2]; l3 = loop_index[i][1]; e = tt * e + ((tc[i*4+3]*tu+tc[i*4+2])*tu + tc[i*4+1])*tu+tc[i*4]; df1 = tu * df1 + (3.0*tc[l1]*tt+2.0*tc[l2])*tt+tc[l3]; df2 = tt * df2 + (3.0*tc[i*4+3]*tu+2.0*tc[i*4+2])*tu+tc[i*4+1]; ddf1 = tu * ddf1 + 2.0*3.0*tc[l1]*tt+2.0*tc[l2]; ddf2 = tt * ddf2 + 2.0*3.0*tc[4*i+3]*tu+2.0*tc[4*i+2]; } ddf12 = tc[5] + 2.0*tc[9]*tt + 3.0*tc[13]*tt*tt + 2.0*tu*(tc[6]+2.0*tc[10]*tt+3.0*tc[14]*tt*tt) + 3.0*tu*tu*(tc[7]+2.0*tc[11]*tt+3.0*tc[15]*tt*tt); fac = RAD2DEG/dx; df1 = df1 * fac; df2 = df2 * fac; ddf1 = ddf1 * fac * fac; ddf2 = ddf2 * fac * fac; ddf12 = ddf12 * fac * fac; /* CMAP energy */ vtot += e; /* Do forces - first torsion */ fg1 = iprod(r1_ij, r1_kj); hg1 = iprod(r1_kl, r1_kj); fga1 = fg1*ra2r1*rgr1; hgb1 = hg1*rb2r1*rgr1; gaa1 = -ra2r1*rg1; gbb1 = rb2r1*rg1; for (i = 0; i < DIM; i++) { dtf1[i] = gaa1 * a1[i]; dtg1[i] = fga1 * a1[i] - hgb1 * b1[i]; dth1[i] = gbb1 * b1[i]; f1[i] = df1 * dtf1[i]; g1[i] = df1 * dtg1[i]; h1[i] = df1 * dth1[i]; f1_i[i] = f1[i]; f1_j[i] = -f1[i] - g1[i]; f1_k[i] = h1[i] + g1[i]; f1_l[i] = -h1[i]; f[a1i][i] = f[a1i][i] + f1_i[i]; f[a1j][i] = f[a1j][i] + f1_j[i]; /* - f1[i] - g1[i] */ f[a1k][i] = f[a1k][i] + f1_k[i]; /* h1[i] + g1[i] */ f[a1l][i] = f[a1l][i] + f1_l[i]; /* h1[i] */ } /* Do forces - second torsion */ fg2 = iprod(r2_ij, r2_kj); hg2 = iprod(r2_kl, r2_kj); fga2 = fg2*ra2r2*rgr2; hgb2 = hg2*rb2r2*rgr2; gaa2 = -ra2r2*rg2; gbb2 = rb2r2*rg2; for (i = 0; i < DIM; i++) { dtf2[i] = gaa2 * a2[i]; dtg2[i] = fga2 * a2[i] - hgb2 * b2[i]; dth2[i] = gbb2 * b2[i]; f2[i] = df2 * dtf2[i]; g2[i] = df2 * dtg2[i]; h2[i] = df2 * dth2[i]; f2_i[i] = f2[i]; f2_j[i] = -f2[i] - g2[i]; f2_k[i] = h2[i] + g2[i]; f2_l[i] = -h2[i]; f[a2i][i] = f[a2i][i] + f2_i[i]; /* f2[i] */ f[a2j][i] = f[a2j][i] + f2_j[i]; /* - f2[i] - g2[i] */ f[a2k][i] = f[a2k][i] + f2_k[i]; /* h2[i] + g2[i] */ f[a2l][i] = f[a2l][i] + f2_l[i]; /* - h2[i] */ } /* Shift forces */ if (g) { copy_ivec(SHIFT_IVEC(g, a1j), jt1); ivec_sub(SHIFT_IVEC(g, a1i), jt1, dt1_ij); ivec_sub(SHIFT_IVEC(g, a1k), jt1, dt1_kj); ivec_sub(SHIFT_IVEC(g, a1l), jt1, dt1_lj); t11 = IVEC2IS(dt1_ij); t21 = IVEC2IS(dt1_kj); t31 = IVEC2IS(dt1_lj); copy_ivec(SHIFT_IVEC(g, a2j), jt2); ivec_sub(SHIFT_IVEC(g, a2i), jt2, dt2_ij); ivec_sub(SHIFT_IVEC(g, a2k), jt2, dt2_kj); ivec_sub(SHIFT_IVEC(g, a2l), jt2, dt2_lj); t12 = IVEC2IS(dt2_ij); t22 = IVEC2IS(dt2_kj); t32 = IVEC2IS(dt2_lj); } else if (pbc) { t31 = pbc_rvec_sub(pbc, x[a1l], x[a1j], h1); t32 = pbc_rvec_sub(pbc, x[a2l], x[a2j], h2); } else { t31 = CENTRAL; t32 = CENTRAL; } rvec_inc(fshift[t11], f1_i); rvec_inc(fshift[CENTRAL], f1_j); rvec_inc(fshift[t21], f1_k); rvec_inc(fshift[t31], f1_l); rvec_inc(fshift[t21], f2_i); rvec_inc(fshift[CENTRAL], f2_j); rvec_inc(fshift[t22], f2_k); rvec_inc(fshift[t32], f2_l); } return vtot; } /*********************************************************** * * G R O M O S 9 6 F U N C T I O N S * ***********************************************************/ real g96harmonic(real kA, real kB, real xA, real xB, real x, real lambda, real *V, real *F) { const real half = 0.5; real L1, kk, x0, dx, dx2; real v, f, dvdlambda; L1 = 1.0-lambda; kk = L1*kA+lambda*kB; x0 = L1*xA+lambda*xB; dx = x-x0; dx2 = dx*dx; f = -kk*dx; v = half*kk*dx2; dvdlambda = half*(kB-kA)*dx2 + (xA-xB)*kk*dx; *F = f; *V = v; return dvdlambda; /* That was 21 flops */ } real g96bonds(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { int i, m, ki, ai, aj, type; real dr2, fbond, vbond, fij, vtot; rvec dx; ivec dt; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ki = pbc_rvec_sub(pbc, x[ai], x[aj], dx); /* 3 */ dr2 = iprod(dx, dx); /* 5 */ *dvdlambda += g96harmonic(forceparams[type].harmonic.krA, forceparams[type].harmonic.krB, forceparams[type].harmonic.rA, forceparams[type].harmonic.rB, dr2, lambda, &vbond, &fbond); vtot += 0.5*vbond; /* 1*/ #ifdef DEBUG if (debug) { fprintf(debug, "G96-BONDS: dr = %10g vbond = %10g fbond = %10g\n", sqrt(dr2), vbond, fbond); } #endif if (g) { ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt); ki = IVEC2IS(dt); } for (m = 0; (m < DIM); m++) /* 15 */ { fij = fbond*dx[m]; f[ai][m] += fij; f[aj][m] -= fij; fshift[ki][m] += fij; fshift[CENTRAL][m] -= fij; } } /* 44 TOTAL */ return vtot; } real g96bond_angle(const rvec xi, const rvec xj, const rvec xk, const t_pbc *pbc, rvec r_ij, rvec r_kj, int *t1, int *t2) /* Return value is the angle between the bonds i-j and j-k */ { real costh; *t1 = pbc_rvec_sub(pbc, xi, xj, r_ij); /* 3 */ *t2 = pbc_rvec_sub(pbc, xk, xj, r_kj); /* 3 */ costh = cos_angle(r_ij, r_kj); /* 25 */ /* 41 TOTAL */ return costh; } real g96angles(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { int i, ai, aj, ak, type, m, t1, t2; rvec r_ij, r_kj; real cos_theta, dVdt, va, vtot; real rij_1, rij_2, rkj_1, rkj_2, rijrkj_1; rvec f_i, f_j, f_k; ivec jt, dt_ij, dt_kj; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ak = forceatoms[i++]; cos_theta = g96bond_angle(x[ai], x[aj], x[ak], pbc, r_ij, r_kj, &t1, &t2); *dvdlambda += g96harmonic(forceparams[type].harmonic.krA, forceparams[type].harmonic.krB, forceparams[type].harmonic.rA, forceparams[type].harmonic.rB, cos_theta, lambda, &va, &dVdt); vtot += va; rij_1 = gmx_invsqrt(iprod(r_ij, r_ij)); rkj_1 = gmx_invsqrt(iprod(r_kj, r_kj)); rij_2 = rij_1*rij_1; rkj_2 = rkj_1*rkj_1; rijrkj_1 = rij_1*rkj_1; /* 23 */ #ifdef DEBUG if (debug) { fprintf(debug, "G96ANGLES: costheta = %10g vth = %10g dV/dct = %10g\n", cos_theta, va, dVdt); } #endif for (m = 0; (m < DIM); m++) /* 42 */ { f_i[m] = dVdt*(r_kj[m]*rijrkj_1 - r_ij[m]*rij_2*cos_theta); f_k[m] = dVdt*(r_ij[m]*rijrkj_1 - r_kj[m]*rkj_2*cos_theta); f_j[m] = -f_i[m]-f_k[m]; f[ai][m] += f_i[m]; f[aj][m] += f_j[m]; f[ak][m] += f_k[m]; } if (g) { copy_ivec(SHIFT_IVEC(g, aj), jt); ivec_sub(SHIFT_IVEC(g, ai), jt, dt_ij); ivec_sub(SHIFT_IVEC(g, ak), jt, dt_kj); t1 = IVEC2IS(dt_ij); t2 = IVEC2IS(dt_kj); } rvec_inc(fshift[t1], f_i); rvec_inc(fshift[CENTRAL], f_j); rvec_inc(fshift[t2], f_k); /* 9 */ /* 163 TOTAL */ } return vtot; } real cross_bond_bond(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { /* Potential from Lawrence and Skimmer, Chem. Phys. Lett. 372 (2003) * pp. 842-847 */ int i, ai, aj, ak, type, m, t1, t2; rvec r_ij, r_kj; real vtot, vrr, s1, s2, r1, r2, r1e, r2e, krr; rvec f_i, f_j, f_k; ivec jt, dt_ij, dt_kj; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ak = forceatoms[i++]; r1e = forceparams[type].cross_bb.r1e; r2e = forceparams[type].cross_bb.r2e; krr = forceparams[type].cross_bb.krr; /* Compute distance vectors ... */ t1 = pbc_rvec_sub(pbc, x[ai], x[aj], r_ij); t2 = pbc_rvec_sub(pbc, x[ak], x[aj], r_kj); /* ... and their lengths */ r1 = norm(r_ij); r2 = norm(r_kj); /* Deviations from ideality */ s1 = r1-r1e; s2 = r2-r2e; /* Energy (can be negative!) */ vrr = krr*s1*s2; vtot += vrr; /* Forces */ svmul(-krr*s2/r1, r_ij, f_i); svmul(-krr*s1/r2, r_kj, f_k); for (m = 0; (m < DIM); m++) /* 12 */ { f_j[m] = -f_i[m] - f_k[m]; f[ai][m] += f_i[m]; f[aj][m] += f_j[m]; f[ak][m] += f_k[m]; } /* Virial stuff */ if (g) { copy_ivec(SHIFT_IVEC(g, aj), jt); ivec_sub(SHIFT_IVEC(g, ai), jt, dt_ij); ivec_sub(SHIFT_IVEC(g, ak), jt, dt_kj); t1 = IVEC2IS(dt_ij); t2 = IVEC2IS(dt_kj); } rvec_inc(fshift[t1], f_i); rvec_inc(fshift[CENTRAL], f_j); rvec_inc(fshift[t2], f_k); /* 9 */ /* 163 TOTAL */ } return vtot; } real cross_bond_angle(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { /* Potential from Lawrence and Skimmer, Chem. Phys. Lett. 372 (2003) * pp. 842-847 */ int i, ai, aj, ak, type, m, t1, t2, t3; rvec r_ij, r_kj, r_ik; real vtot, vrt, s1, s2, s3, r1, r2, r3, r1e, r2e, r3e, krt, k1, k2, k3; rvec f_i, f_j, f_k; ivec jt, dt_ij, dt_kj; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ak = forceatoms[i++]; r1e = forceparams[type].cross_ba.r1e; r2e = forceparams[type].cross_ba.r2e; r3e = forceparams[type].cross_ba.r3e; krt = forceparams[type].cross_ba.krt; /* Compute distance vectors ... */ t1 = pbc_rvec_sub(pbc, x[ai], x[aj], r_ij); t2 = pbc_rvec_sub(pbc, x[ak], x[aj], r_kj); t3 = pbc_rvec_sub(pbc, x[ai], x[ak], r_ik); /* ... and their lengths */ r1 = norm(r_ij); r2 = norm(r_kj); r3 = norm(r_ik); /* Deviations from ideality */ s1 = r1-r1e; s2 = r2-r2e; s3 = r3-r3e; /* Energy (can be negative!) */ vrt = krt*s3*(s1+s2); vtot += vrt; /* Forces */ k1 = -krt*(s3/r1); k2 = -krt*(s3/r2); k3 = -krt*(s1+s2)/r3; for (m = 0; (m < DIM); m++) { f_i[m] = k1*r_ij[m] + k3*r_ik[m]; f_k[m] = k2*r_kj[m] - k3*r_ik[m]; f_j[m] = -f_i[m] - f_k[m]; } for (m = 0; (m < DIM); m++) /* 12 */ { f[ai][m] += f_i[m]; f[aj][m] += f_j[m]; f[ak][m] += f_k[m]; } /* Virial stuff */ if (g) { copy_ivec(SHIFT_IVEC(g, aj), jt); ivec_sub(SHIFT_IVEC(g, ai), jt, dt_ij); ivec_sub(SHIFT_IVEC(g, ak), jt, dt_kj); t1 = IVEC2IS(dt_ij); t2 = IVEC2IS(dt_kj); } rvec_inc(fshift[t1], f_i); rvec_inc(fshift[CENTRAL], f_j); rvec_inc(fshift[t2], f_k); /* 9 */ /* 163 TOTAL */ } return vtot; } static real bonded_tab(const char *type, int table_nr, const bondedtable_t *table, real kA, real kB, real r, real lambda, real *V, real *F) { real k, tabscale, *VFtab, rt, eps, eps2, Yt, Ft, Geps, Heps2, Fp, VV, FF; int n0, nnn; real v, f, dvdlambda; k = (1.0 - lambda)*kA + lambda*kB; tabscale = table->scale; VFtab = table->data; rt = r*tabscale; n0 = rt; if (n0 >= table->n) { gmx_fatal(FARGS, "A tabulated %s interaction table number %d is out of the table range: r %f, between table indices %d and %d, table length %d", type, table_nr, r, n0, n0+1, table->n); } eps = rt - n0; eps2 = eps*eps; nnn = 4*n0; Yt = VFtab[nnn]; Ft = VFtab[nnn+1]; Geps = VFtab[nnn+2]*eps; Heps2 = VFtab[nnn+3]*eps2; Fp = Ft + Geps + Heps2; VV = Yt + Fp*eps; FF = Fp + Geps + 2.0*Heps2; *F = -k*FF*tabscale; *V = k*VV; dvdlambda = (kB - kA)*VV; return dvdlambda; /* That was 22 flops */ } real tab_bonds(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { int i, m, ki, ai, aj, type, table; real dr, dr2, fbond, vbond, fij, vtot; rvec dx; ivec dt; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ki = pbc_rvec_sub(pbc, x[ai], x[aj], dx); /* 3 */ dr2 = iprod(dx, dx); /* 5 */ dr = dr2*gmx_invsqrt(dr2); /* 10 */ table = forceparams[type].tab.table; *dvdlambda += bonded_tab("bond", table, &fcd->bondtab[table], forceparams[type].tab.kA, forceparams[type].tab.kB, dr, lambda, &vbond, &fbond); /* 22 */ if (dr2 == 0.0) { continue; } vtot += vbond; /* 1*/ fbond *= gmx_invsqrt(dr2); /* 6 */ #ifdef DEBUG if (debug) { fprintf(debug, "TABBONDS: dr = %10g vbond = %10g fbond = %10g\n", dr, vbond, fbond); } #endif if (g) { ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), dt); ki = IVEC2IS(dt); } for (m = 0; (m < DIM); m++) /* 15 */ { fij = fbond*dx[m]; f[ai][m] += fij; f[aj][m] -= fij; fshift[ki][m] += fij; fshift[CENTRAL][m] -= fij; } } /* 62 TOTAL */ return vtot; } real tab_angles(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { int i, ai, aj, ak, t1, t2, type, table; rvec r_ij, r_kj; real cos_theta, cos_theta2, theta, dVdt, va, vtot; ivec jt, dt_ij, dt_kj; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ak = forceatoms[i++]; theta = bond_angle(x[ai], x[aj], x[ak], pbc, r_ij, r_kj, &cos_theta, &t1, &t2); /* 41 */ table = forceparams[type].tab.table; *dvdlambda += bonded_tab("angle", table, &fcd->angletab[table], forceparams[type].tab.kA, forceparams[type].tab.kB, theta, lambda, &va, &dVdt); /* 22 */ vtot += va; cos_theta2 = sqr(cos_theta); /* 1 */ if (cos_theta2 < 1) { int m; real snt, st, sth; real cik, cii, ckk; real nrkj2, nrij2; rvec f_i, f_j, f_k; st = dVdt*gmx_invsqrt(1 - cos_theta2); /* 12 */ sth = st*cos_theta; /* 1 */ #ifdef DEBUG if (debug) { fprintf(debug, "ANGLES: theta = %10g vth = %10g dV/dtheta = %10g\n", theta*RAD2DEG, va, dVdt); } #endif nrkj2 = iprod(r_kj, r_kj); /* 5 */ nrij2 = iprod(r_ij, r_ij); cik = st*gmx_invsqrt(nrkj2*nrij2); /* 12 */ cii = sth/nrij2; /* 10 */ ckk = sth/nrkj2; /* 10 */ for (m = 0; (m < DIM); m++) /* 39 */ { f_i[m] = -(cik*r_kj[m]-cii*r_ij[m]); f_k[m] = -(cik*r_ij[m]-ckk*r_kj[m]); f_j[m] = -f_i[m]-f_k[m]; f[ai][m] += f_i[m]; f[aj][m] += f_j[m]; f[ak][m] += f_k[m]; } if (g) { copy_ivec(SHIFT_IVEC(g, aj), jt); ivec_sub(SHIFT_IVEC(g, ai), jt, dt_ij); ivec_sub(SHIFT_IVEC(g, ak), jt, dt_kj); t1 = IVEC2IS(dt_ij); t2 = IVEC2IS(dt_kj); } rvec_inc(fshift[t1], f_i); rvec_inc(fshift[CENTRAL], f_j); rvec_inc(fshift[t2], f_k); } /* 169 TOTAL */ } return vtot; } real tab_dihs(int nbonds, const t_iatom forceatoms[], const t_iparams forceparams[], const rvec x[], rvec f[], rvec fshift[], const t_pbc *pbc, const t_graph *g, real lambda, real *dvdlambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { int i, type, ai, aj, ak, al, table; int t1, t2, t3; rvec r_ij, r_kj, r_kl, m, n; real phi, sign, ddphi, vpd, vtot; vtot = 0.0; for (i = 0; (i < nbonds); ) { type = forceatoms[i++]; ai = forceatoms[i++]; aj = forceatoms[i++]; ak = forceatoms[i++]; al = forceatoms[i++]; phi = dih_angle(x[ai], x[aj], x[ak], x[al], pbc, r_ij, r_kj, r_kl, m, n, &sign, &t1, &t2, &t3); /* 84 */ table = forceparams[type].tab.table; /* Hopefully phi+M_PI never results in values < 0 */ *dvdlambda += bonded_tab("dihedral", table, &fcd->dihtab[table], forceparams[type].tab.kA, forceparams[type].tab.kB, phi+M_PI, lambda, &vpd, &ddphi); vtot += vpd; do_dih_fup(ai, aj, ak, al, -ddphi, r_ij, r_kj, r_kl, m, n, f, fshift, pbc, g, x, t1, t2, t3); /* 112 */ #ifdef DEBUG fprintf(debug, "pdih: (%d,%d,%d,%d) phi=%g\n", ai, aj, ak, al, phi); #endif } /* 227 TOTAL */ return vtot; } /* Return if this is a potential calculated in bondfree.c, * i.e. an interaction that actually calculates a potential and * works on multiple atoms (not e.g. a connection or a position restraint). */ static gmx_inline gmx_bool ftype_is_bonded_potential(int ftype) { return (interaction_function[ftype].flags & IF_BOND) && !(ftype == F_CONNBONDS || ftype == F_POSRES) && (ftype < F_GB12 || ftype > F_GB14); } static void divide_bondeds_over_threads(t_idef *idef, int nthreads) { int ftype; int nat1; int t; int il_nr_thread; idef->nthreads = nthreads; if (F_NRE*(nthreads+1) > idef->il_thread_division_nalloc) { idef->il_thread_division_nalloc = F_NRE*(nthreads+1); snew(idef->il_thread_division, idef->il_thread_division_nalloc); } for (ftype = 0; ftype < F_NRE; ftype++) { if (ftype_is_bonded_potential(ftype)) { nat1 = interaction_function[ftype].nratoms + 1; for (t = 0; t <= nthreads; t++) { /* Divide the interactions equally over the threads. * When the different types of bonded interactions * are distributed roughly equally over the threads, * this should lead to well localized output into * the force buffer on each thread. * If this is not the case, a more advanced scheme * (not implemented yet) will do better. */ il_nr_thread = (((idef->il[ftype].nr/nat1)*t)/nthreads)*nat1; /* Ensure that distance restraint pairs with the same label * end up on the same thread. * This is slighlty tricky code, since the next for iteration * may have an initial il_nr_thread lower than the final value * in the previous iteration, but this will anyhow be increased * to the approriate value again by this while loop. */ while (ftype == F_DISRES && il_nr_thread > 0 && il_nr_thread < idef->il[ftype].nr && idef->iparams[idef->il[ftype].iatoms[il_nr_thread]].disres.label == idef->iparams[idef->il[ftype].iatoms[il_nr_thread-nat1]].disres.label) { il_nr_thread += nat1; } idef->il_thread_division[ftype*(nthreads+1)+t] = il_nr_thread; } } } } static unsigned calc_bonded_reduction_mask(const t_idef *idef, int shift, int t, int nt) { unsigned mask; int ftype, nb, nat1, nb0, nb1, i, a; mask = 0; for (ftype = 0; ftype < F_NRE; ftype++) { if (ftype_is_bonded_potential(ftype)) { nb = idef->il[ftype].nr; if (nb > 0) { nat1 = interaction_function[ftype].nratoms + 1; /* Divide this interaction equally over the threads. * This is not stored: should match division in calc_bonds. */ nb0 = idef->il_thread_division[ftype*(nt+1)+t]; nb1 = idef->il_thread_division[ftype*(nt+1)+t+1]; for (i = nb0; i < nb1; i += nat1) { for (a = 1; a < nat1; a++) { mask |= (1U << (idef->il[ftype].iatoms[i+a]>>shift)); } } } } } return mask; } void setup_bonded_threading(t_forcerec *fr, t_idef *idef) { #define MAX_BLOCK_BITS 32 int t; int ctot, c, b; #ifndef NDEBUG assert(fr->nthreads >= 1); #endif /* Divide the bonded interaction over the threads */ divide_bondeds_over_threads(idef, fr->nthreads); if (fr->nthreads == 1) { fr->red_nblock = 0; return; } /* We divide the force array in a maximum of 32 blocks. * Minimum force block reduction size is 2^6=64. */ fr->red_ashift = 6; while (fr->natoms_force > (int)(MAX_BLOCK_BITS*(1U<<fr->red_ashift))) { fr->red_ashift++; } if (debug) { fprintf(debug, "bonded force buffer block atom shift %d bits\n", fr->red_ashift); } /* Determine to which blocks each thread's bonded force calculation * contributes. Store this is a mask for each thread. */ #pragma omp parallel for num_threads(fr->nthreads) schedule(static) for (t = 1; t < fr->nthreads; t++) { fr->f_t[t].red_mask = calc_bonded_reduction_mask(idef, fr->red_ashift, t, fr->nthreads); } /* Determine the maximum number of blocks we need to reduce over */ fr->red_nblock = 0; ctot = 0; for (t = 0; t < fr->nthreads; t++) { c = 0; for (b = 0; b < MAX_BLOCK_BITS; b++) { if (fr->f_t[t].red_mask & (1U<<b)) { fr->red_nblock = max(fr->red_nblock, b+1); c++; } } if (debug) { fprintf(debug, "thread %d flags %x count %d\n", t, fr->f_t[t].red_mask, c); } ctot += c; } if (debug) { fprintf(debug, "Number of blocks to reduce: %d of size %d\n", fr->red_nblock, 1<<fr->red_ashift); fprintf(debug, "Reduction density %.2f density/#thread %.2f\n", ctot*(1<<fr->red_ashift)/(double)fr->natoms_force, ctot*(1<<fr->red_ashift)/(double)(fr->natoms_force*fr->nthreads)); } } static void zero_thread_forces(f_thread_t *f_t, int n, int nblock, int blocksize) { int b, a0, a1, a, i, j; if (n > f_t->f_nalloc) { f_t->f_nalloc = over_alloc_large(n); srenew(f_t->f, f_t->f_nalloc); } if (f_t->red_mask != 0) { for (b = 0; b < nblock; b++) { if (f_t->red_mask && (1U<<b)) { a0 = b*blocksize; a1 = min((b+1)*blocksize, n); for (a = a0; a < a1; a++) { clear_rvec(f_t->f[a]); } } } } for (i = 0; i < SHIFTS; i++) { clear_rvec(f_t->fshift[i]); } for (i = 0; i < F_NRE; i++) { f_t->ener[i] = 0; } for (i = 0; i < egNR; i++) { for (j = 0; j < f_t->grpp.nener; j++) { f_t->grpp.ener[i][j] = 0; } } for (i = 0; i < efptNR; i++) { f_t->dvdl[i] = 0; } } static void reduce_thread_force_buffer(int n, rvec *f, int nthreads, f_thread_t *f_t, int nblock, int block_size) { /* The max thread number is arbitrary, * we used a fixed number to avoid memory management. * Using more than 16 threads is probably never useful performance wise. */ #define MAX_BONDED_THREADS 256 int b; if (nthreads > MAX_BONDED_THREADS) { gmx_fatal(FARGS, "Can not reduce bonded forces on more than %d threads", MAX_BONDED_THREADS); } /* This reduction can run on any number of threads, * independently of nthreads. */ #pragma omp parallel for num_threads(nthreads) schedule(static) for (b = 0; b < nblock; b++) { rvec *fp[MAX_BONDED_THREADS]; int nfb, ft, fb; int a0, a1, a; /* Determine which threads contribute to this block */ nfb = 0; for (ft = 1; ft < nthreads; ft++) { if (f_t[ft].red_mask & (1U<<b)) { fp[nfb++] = f_t[ft].f; } } if (nfb > 0) { /* Reduce force buffers for threads that contribute */ a0 = b *block_size; a1 = (b+1)*block_size; a1 = min(a1, n); for (a = a0; a < a1; a++) { for (fb = 0; fb < nfb; fb++) { rvec_inc(f[a], fp[fb][a]); } } } } } static void reduce_thread_forces(int n, rvec *f, rvec *fshift, real *ener, gmx_grppairener_t *grpp, real *dvdl, int nthreads, f_thread_t *f_t, int nblock, int block_size, gmx_bool bCalcEnerVir, gmx_bool bDHDL) { if (nblock > 0) { /* Reduce the bonded force buffer */ reduce_thread_force_buffer(n, f, nthreads, f_t, nblock, block_size); } /* When necessary, reduce energy and virial using one thread only */ if (bCalcEnerVir) { int t, i, j; for (i = 0; i < SHIFTS; i++) { for (t = 1; t < nthreads; t++) { rvec_inc(fshift[i], f_t[t].fshift[i]); } } for (i = 0; i < F_NRE; i++) { for (t = 1; t < nthreads; t++) { ener[i] += f_t[t].ener[i]; } } for (i = 0; i < egNR; i++) { for (j = 0; j < f_t[1].grpp.nener; j++) { for (t = 1; t < nthreads; t++) { grpp->ener[i][j] += f_t[t].grpp.ener[i][j]; } } } if (bDHDL) { for (i = 0; i < efptNR; i++) { for (t = 1; t < nthreads; t++) { dvdl[i] += f_t[t].dvdl[i]; } } } } } static real calc_one_bond(FILE *fplog, int thread, int ftype, const t_idef *idef, rvec x[], rvec f[], rvec fshift[], t_forcerec *fr, const t_pbc *pbc, const t_graph *g, gmx_grppairener_t *grpp, t_nrnb *nrnb, real *lambda, real *dvdl, const t_mdatoms *md, t_fcdata *fcd, gmx_bool bCalcEnerVir, int *global_atom_index, gmx_bool bPrintSepPot) { int nat1, nbonds, efptFTYPE; real v = 0; t_iatom *iatoms; int nb0, nbn; if (IS_RESTRAINT_TYPE(ftype)) { efptFTYPE = efptRESTRAINT; } else { efptFTYPE = efptBONDED; } nat1 = interaction_function[ftype].nratoms + 1; nbonds = idef->il[ftype].nr/nat1; iatoms = idef->il[ftype].iatoms; nb0 = idef->il_thread_division[ftype*(idef->nthreads+1)+thread]; nbn = idef->il_thread_division[ftype*(idef->nthreads+1)+thread+1] - nb0; if (!IS_LISTED_LJ_C(ftype)) { if (ftype == F_CMAP) { v = cmap_dihs(nbn, iatoms+nb0, idef->iparams, &idef->cmap_grid, (const rvec*)x, f, fshift, pbc, g, lambda[efptFTYPE], &(dvdl[efptFTYPE]), md, fcd, global_atom_index); } #ifdef SIMD_BONDEDS else if (ftype == F_ANGLES && !bCalcEnerVir && fr->efep == efepNO) { /* No energies, shift forces, dvdl */ angles_noener_simd(nbn, idef->il[ftype].iatoms+nb0, idef->iparams, (const rvec*)x, f, pbc, g, lambda[efptFTYPE], md, fcd, global_atom_index); v = 0; } #endif else if (ftype == F_PDIHS && !bCalcEnerVir && fr->efep == efepNO) { /* No energies, shift forces, dvdl */ #ifndef SIMD_BONDEDS pdihs_noener #else pdihs_noener_simd #endif (nbn, idef->il[ftype].iatoms+nb0, idef->iparams, (const rvec*)x, f, pbc, g, lambda[efptFTYPE], md, fcd, global_atom_index); v = 0; } else { v = interaction_function[ftype].ifunc(nbn, iatoms+nb0, idef->iparams, (const rvec*)x, f, fshift, pbc, g, lambda[efptFTYPE], &(dvdl[efptFTYPE]), md, fcd, global_atom_index); } if (bPrintSepPot) { fprintf(fplog, " %-23s #%4d V %12.5e dVdl %12.5e\n", interaction_function[ftype].longname, nbonds, v, lambda[efptFTYPE]); } } else { v = do_nonbonded_listed(ftype, nbn, iatoms+nb0, idef->iparams, (const rvec*)x, f, fshift, pbc, g, lambda, dvdl, md, fr, grpp, global_atom_index); if (bPrintSepPot) { fprintf(fplog, " %-5s + %-15s #%4d dVdl %12.5e\n", interaction_function[ftype].longname, interaction_function[F_LJ14].longname, nbonds, dvdl[efptVDW]); fprintf(fplog, " %-5s + %-15s #%4d dVdl %12.5e\n", interaction_function[ftype].longname, interaction_function[F_COUL14].longname, nbonds, dvdl[efptCOUL]); } } if (thread == 0) { inc_nrnb(nrnb, interaction_function[ftype].nrnb_ind, nbonds); } return v; } void calc_bonds(FILE *fplog, const gmx_multisim_t *ms, const t_idef *idef, rvec x[], history_t *hist, rvec f[], t_forcerec *fr, const t_pbc *pbc, const t_graph *g, gmx_enerdata_t *enerd, t_nrnb *nrnb, real *lambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index, t_atomtypes *atype, gmx_genborn_t *born, int force_flags, gmx_bool bPrintSepPot, gmx_large_int_t step) { gmx_bool bCalcEnerVir; int i; real v, dvdl[efptNR], dvdl_dum[efptNR]; /* The dummy array is to have a place to store the dhdl at other values of lambda, which will be thrown away in the end*/ const t_pbc *pbc_null; char buf[22]; int thread; #ifndef NDEBUG assert(fr->nthreads == idef->nthreads); #endif bCalcEnerVir = (force_flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)); for (i = 0; i < efptNR; i++) { dvdl[i] = 0.0; } if (fr->bMolPBC) { pbc_null = pbc; } else { pbc_null = NULL; } if (bPrintSepPot) { fprintf(fplog, "Step %s: bonded V and dVdl for this node\n", gmx_step_str(step, buf)); } #ifdef DEBUG if (g && debug) { p_graph(debug, "Bondage is fun", g); } #endif /* Do pre force calculation stuff which might require communication */ if (idef->il[F_ORIRES].nr) { enerd->term[F_ORIRESDEV] = calc_orires_dev(ms, idef->il[F_ORIRES].nr, idef->il[F_ORIRES].iatoms, idef->iparams, md, (const rvec*)x, pbc_null, fcd, hist); } if (idef->il[F_DISRES].nr) { calc_disres_R_6(ms, idef->il[F_DISRES].nr, idef->il[F_DISRES].iatoms, idef->iparams, (const rvec*)x, pbc_null, fcd, hist); } #pragma omp parallel for num_threads(fr->nthreads) schedule(static) for (thread = 0; thread < fr->nthreads; thread++) { int ftype; real *epot, v; /* thread stuff */ rvec *ft, *fshift; real *dvdlt; gmx_grppairener_t *grpp; if (thread == 0) { ft = f; fshift = fr->fshift; epot = enerd->term; grpp = &enerd->grpp; dvdlt = dvdl; } else { zero_thread_forces(&fr->f_t[thread], fr->natoms_force, fr->red_nblock, 1<<fr->red_ashift); ft = fr->f_t[thread].f; fshift = fr->f_t[thread].fshift; epot = fr->f_t[thread].ener; grpp = &fr->f_t[thread].grpp; dvdlt = fr->f_t[thread].dvdl; } /* Loop over all bonded force types to calculate the bonded forces */ for (ftype = 0; (ftype < F_NRE); ftype++) { if (idef->il[ftype].nr > 0 && ftype_is_bonded_potential(ftype)) { v = calc_one_bond(fplog, thread, ftype, idef, x, ft, fshift, fr, pbc_null, g, grpp, nrnb, lambda, dvdlt, md, fcd, bCalcEnerVir, global_atom_index, bPrintSepPot); epot[ftype] += v; } } } if (fr->nthreads > 1) { reduce_thread_forces(fr->natoms_force, f, fr->fshift, enerd->term, &enerd->grpp, dvdl, fr->nthreads, fr->f_t, fr->red_nblock, 1<<fr->red_ashift, bCalcEnerVir, force_flags & GMX_FORCE_DHDL); } if (force_flags & GMX_FORCE_DHDL) { for (i = 0; i < efptNR; i++) { enerd->dvdl_nonlin[i] += dvdl[i]; } } /* Copy the sum of violations for the distance restraints from fcd */ if (fcd) { enerd->term[F_DISRESVIOL] = fcd->disres.sumviol; } } void calc_bonds_lambda(FILE *fplog, const t_idef *idef, rvec x[], t_forcerec *fr, const t_pbc *pbc, const t_graph *g, gmx_grppairener_t *grpp, real *epot, t_nrnb *nrnb, real *lambda, const t_mdatoms *md, t_fcdata *fcd, int *global_atom_index) { int i, ftype, nr_nonperturbed, nr; real v; real dvdl_dum[efptNR]; rvec *f, *fshift; const t_pbc *pbc_null; t_idef idef_fe; if (fr->bMolPBC) { pbc_null = pbc; } else { pbc_null = NULL; } /* Copy the whole idef, so we can modify the contents locally */ idef_fe = *idef; idef_fe.nthreads = 1; snew(idef_fe.il_thread_division, F_NRE*(idef_fe.nthreads+1)); /* We already have the forces, so we use temp buffers here */ snew(f, fr->natoms_force); snew(fshift, SHIFTS); /* Loop over all bonded force types to calculate the bonded energies */ for (ftype = 0; (ftype < F_NRE); ftype++) { if (ftype_is_bonded_potential(ftype)) { /* Set the work range of thread 0 to the perturbed bondeds only */ nr_nonperturbed = idef->il[ftype].nr_nonperturbed; nr = idef->il[ftype].nr; idef_fe.il_thread_division[ftype*2+0] = nr_nonperturbed; idef_fe.il_thread_division[ftype*2+1] = nr; /* This is only to get the flop count correct */ idef_fe.il[ftype].nr = nr - nr_nonperturbed; if (nr - nr_nonperturbed > 0) { v = calc_one_bond(fplog, 0, ftype, &idef_fe, x, f, fshift, fr, pbc_null, g, grpp, nrnb, lambda, dvdl_dum, md, fcd, TRUE, global_atom_index, FALSE); epot[ftype] += v; } } } sfree(fshift); sfree(f); sfree(idef_fe.il_thread_division); }
GB_unop__identity_int16_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int16_fc32) // op(A') function: GB (_unop_tran__identity_int16_fc32) // C type: int16_t // A type: GxB_FC32_t // cast: int16_t cij = GB_cast_to_int16_t ((double) crealf (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = GB_cast_to_int16_t ((double) crealf (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = GB_cast_to_int16_t ((double) crealf (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int16_fc32) ( int16_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; int16_t z = GB_cast_to_int16_t ((double) crealf (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; int16_t z = GB_cast_to_int16_t ((double) crealf (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int16_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolutiondepthwise_3x3_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; int *outptr = out; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr += sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const signed char *kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; int *outptr = out; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr += sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
Fdtd.h
#pragma once #include "Constants.h" #include "FieldSolver.h" #include "Grid.h" #include "PmlFdtd.h" #include "Vectors.h" #include <algorithm> namespace pfc { class FDTD : public RealFieldSolver<YeeGridType> { public: FDTD(YeeGrid* grid, FP dt); void updateFields(); void setPML(int sizePMLx, int sizePMLy, int sizePMLz); void setFieldGenerator(FieldGeneratorYee * _generator); void updateHalfB(); void updateE(); void setTimeStep(FP dt); FP getCourantCondition() const { double tmp = sqrt(1.0 / (grid->steps.x*grid->steps.x) + 1.0 / (grid->steps.y*grid->steps.y) + 1.0 / (grid->steps.z*grid->steps.z)); return 1.0 / (constants::c * tmp); } bool ifCourantConditionSatisfied(FP dt) const { return dt < getCourantCondition(); } private: void updateHalfB3D(); void updateHalfB2D(); void updateHalfB1D(); void updateE3D(); void updateE2D(); void updateE1D(); FP3 anisotropyCoeff; void setAnisotropy(const FP frequency, int axis); }; inline FDTD::FDTD(YeeGrid* grid, FP dt) : RealFieldSolver(grid, dt, 0.0, 0.5*dt, 0.0) { if (!ifCourantConditionSatisfied(dt)) { std::cout << "WARNING: FDTD Courant condition is not satisfied. Another time step was setted up" << std::endl; this->dt = getCourantCondition() * 0.5; } updateDims(); pml.reset(new Pml<GridTypes::YeeGridType>(this, Int3(0, 0, 0)));//pml.reset(new PmlFdtd(this));; generator.reset(new ReflectFieldGeneratorYee(this)); updateInternalDims(); anisotropyCoeff = FP3(1, 1, 1); } inline void FDTD::setPML(int sizePMLx, int sizePMLy, int sizePMLz) { pml.reset(new PmlFdtd(this, Int3(sizePMLx, sizePMLy, sizePMLz))); updateInternalDims(); } inline void FDTD::setTimeStep(FP dt) { if (ifCourantConditionSatisfied(dt)) { this->dt = dt; this->timeShiftB = 0.5*dt; if (pml->sizePML == Int3(0, 0, 0)) pml.reset(new Pml<GridTypes::YeeGridType>(this, Int3(0, 0, 0))); else pml.reset(new PmlFdtd(this, pml->sizePML)); generator.reset(generator->createInstance(this)); } else { std::cout << "WARNING: FDTD Courant condition is not satisfied. Time step was not changed" << std::endl; } } inline void FDTD::setFieldGenerator(FieldGeneratorYee * _generator) { generator.reset(_generator); } inline void FDTD::setAnisotropy(FP frequency, int axis) { // We introduce artificial anisotropy, through one axis. // For this we upgrade Maxwell equations by coefficients, // which computes from major signal frequency. // See more in Juntunen,Tsiboukis - Reduction of Numerical Dispersion in // FDTD Method Through Artificial Anisotropy. FP3 steps = grid->steps; FP WP = constants::pi * 2.0 * constants::c / frequency; FP R = WP / steps.norm(); const FP q = 0.99; // q - stability coefficient, 0 <= q <= 1 FP Amax = constants::pi / (3 * R * asin(asin(constants::pi / (R * sqrt(3.0))) / sqrt(3.0))); FP Q = Amax - 1; FP c1 = 1 - Q / 2; int axis0 = axis; int axis1 = (axis + 1) % 3; int axis2 = (axis + 2) % 3; // equivalents of the variables // Z1 == Zy, Zz == Zz // Zy,Zz - designation from article FP Z1 = steps[axis0] / steps[axis1]; FP Z2 = steps[axis0] / steps[axis2]; // equivalents of the variables // CoeffA == K1, CoeffB == K2, a1 == a, a2 == b // K1, K2, a, b - designation from article FP CoeffA = constants::pi / (R * sqrt(1 + 1 / (Z1 * Z1) + 1 / (Z2 * Z2))); FP a1 = sin(CoeffA / c1) * sin(CoeffA / c1) / (Z1 * Z1 * sin(CoeffA / (c1 * Z1)) * sin(CoeffA / (c1 * Z1))); FP a2 = sin(CoeffA / c1) * sin(CoeffA / c1) / (Z2 * Z2 * sin(CoeffA / (c1 * Z2)) * sin(CoeffA / (c1 * Z2))); FP CoeffB = sqrt(1 + a1 * Z1 * Z1 + a2 * Z2 * Z2); anisotropyCoeff[axis0] = CoeffB / (CoeffA * q * sqrt(a1 * a2)) * asin(q * sin(CoeffA / c1) / CoeffB); anisotropyCoeff[axis1] = a1 * anisotropyCoeff[axis0]; anisotropyCoeff[axis2] = a2 * anisotropyCoeff[axis0]; } inline void FDTD::updateFields() { updateHalfB(); pml->updateB(); generator->generateB(); updateE(); pml->updateE(); generator->generateE(); updateHalfB(); globalTime += dt; } // Update grid values of magnetic field in FDTD. inline void FDTD::updateHalfB() { if (grid->dimensionality == 3) updateHalfB3D(); else if (grid->dimensionality == 2) updateHalfB2D(); else if (grid->dimensionality == 1) updateHalfB1D(); } inline void FDTD::updateHalfB3D() { updateBAreaBegin = Int3(1, 1, 1); updateBAreaEnd = grid->numCells - Int3(1, 1, 1); for (int d = 0; d < 3; ++d) { internalBAreaBegin[d] = std::max(updateBAreaBegin[d], pml->leftDims[d]); internalBAreaEnd[d] = std::min(updateBAreaEnd[d], grid->numCells[d] - pml->rightDims[d]); } const FP cdt = constants::c * dt * (FP)0.5; const FP coeffXY = cdt / (grid->steps.x * anisotropyCoeff.y); const FP coeffXZ = cdt / (grid->steps.x * anisotropyCoeff.z); const FP coeffYX = cdt / (grid->steps.y * anisotropyCoeff.x); const FP coeffYZ = cdt / (grid->steps.y * anisotropyCoeff.z); const FP coeffZX = cdt / (grid->steps.z * anisotropyCoeff.x); const FP coeffZY = cdt / (grid->steps.z * anisotropyCoeff.y); // In central area use b(i, j, k) += c * dt * -rot(e(i, j, k)), which is: // b.x(i, j, k) += c * dt * ((e.y(i, j, k) - e.y(i, j, k-1)) / eps_z * dz - // (e.z(i, j, k) - e.z(i, j-1, k)) / eps_y * dy), // b.y(i, j, k) += c * dt * ((e.z(i, j, k) - e.z(i-1, j, k)) / eps_x * dx - // (e.x(i, j, k) - e.x(i, j, k-1)) / eps_z * dz), // b.z(i, j, k) += c * dt * ((e.x(i, j, k) - e.x(i, j-1, k)) / eps_y * dy - // (e.y(i, j, k) - e.y(i-1, j, k)) / eps_x * dx), const Int3 begin = internalBAreaBegin; const Int3 end = internalBAreaEnd; #pragma omp parallel for collapse(2) for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) { #pragma simd for (int k = begin.z; k < end.z; k++) { grid->Bx(i, j, k) += coeffZX * (grid->Ey(i, j, k) - grid->Ey(i, j, k - 1)) - coeffYX * (grid->Ez(i, j, k) - grid->Ez(i, j - 1, k)); grid->By(i, j, k) += coeffXY * (grid->Ez(i, j, k) - grid->Ez(i - 1, j, k)) - coeffZY * (grid->Ex(i, j, k) - grid->Ex(i, j, k - 1)); grid->Bz(i, j, k) += coeffYZ * (grid->Ex(i, j, k) - grid->Ex(i, j - 1, k)) - coeffXZ * (grid->Ey(i, j, k) - grid->Ey(i - 1, j, k)); } } } inline void FDTD::updateHalfB2D() { updateBAreaBegin = Int3(1, 1, 0); updateBAreaEnd = grid->numCells - Int3(1, 1, 0); for (int d = 0; d < 2; ++d) { internalBAreaBegin[d] = std::max(updateBAreaBegin[d], pml->leftDims[d]); internalBAreaEnd[d] = std::min(updateBAreaEnd[d], grid->numCells[d] - pml->rightDims[d]); } const FP cdt = constants::c * dt * (FP)0.5; const FP coeffXY = cdt / (grid->steps.x * anisotropyCoeff.y); const FP coeffXZ = cdt / (grid->steps.x * anisotropyCoeff.z); const FP coeffYX = cdt / (grid->steps.y * anisotropyCoeff.x); const FP coeffYZ = cdt / (grid->steps.y * anisotropyCoeff.z); // In central area use b(i, j, k) += c * dt * -rot(e(i, j, k)), which is: // b.x(i, j, k) += c * dt * ((e.y(i, j, k) - e.y(i, j, k-1)) / eps_z * dz - // (e.z(i, j, k) - e.z(i, j-1, k)) / eps_y * dy), // b.y(i, j, k) += c * dt * ((e.z(i, j, k) - e.z(i-1, j, k)) / eps_x * dx - // (e.x(i, j, k) - e.x(i, j, k-1)) / eps_z * dz), // b.z(i, j, k) += c * dt * ((e.x(i, j, k) - e.x(i, j-1, k)) / eps_y * dy - // (e.y(i, j, k) - e.y(i-1, j, k)) / eps_x * dx), const Int3 begin = internalBAreaBegin; const Int3 end = internalBAreaEnd; #pragma omp parallel for for (int i = begin.x; i < end.x; i++) { #pragma simd for (int j = begin.y; j < end.y; j++) { grid->Bx(i, j, 0) += -coeffYX * (grid->Ez(i, j, 0) - grid->Ez(i, j - 1, 0)); grid->By(i, j, 0) += coeffXY * (grid->Ez(i, j, 0) - grid->Ez(i - 1, j, 0)); grid->Bz(i, j, 0) += coeffYZ * (grid->Ex(i, j, 0) - grid->Ex(i, j - 1, 0)) - coeffXZ * (grid->Ey(i, j, 0) - grid->Ey(i - 1, j, 0)); } } } inline void FDTD::updateHalfB1D() { updateBAreaBegin = Int3(1, 0, 0); updateBAreaEnd = grid->numCells - Int3(1, 0, 0); for (int d = 0; d < 1; ++d) { internalBAreaBegin[d] = std::max(updateBAreaBegin[d], pml->leftDims[d]); internalBAreaEnd[d] = std::min(updateBAreaEnd[d], grid->numCells[d] - pml->rightDims[d]); } const FP cdt = constants::c * dt * (FP)0.5; const FP coeffXY = cdt / (grid->steps.x * anisotropyCoeff.y); const FP coeffXZ = cdt / (grid->steps.x * anisotropyCoeff.z); // In central area use b(i, j, k) += c * dt * -rot(e(i, j, k)), which is: // b.x(i, j, k) += c * dt * ((e.y(i, j, k) - e.y(i, j, k-1)) / eps_z * dz - // (e.z(i, j, k) - e.z(i, j-1, k)) / eps_y * dy), // b.y(i, j, k) += c * dt * ((e.z(i, j, k) - e.z(i-1, j, k)) / eps_x * dx - // (e.x(i, j, k) - e.x(i, j, k-1)) / eps_z * dz), // b.z(i, j, k) += c * dt * ((e.x(i, j, k) - e.x(i, j-1, k)) / eps_y * dy - // (e.y(i, j, k) - e.y(i-1, j, k)) / eps_x * dx), const Int3 begin = internalBAreaBegin; const Int3 end = internalBAreaEnd; #pragma omp parallel for for (int i = begin.x; i < end.x; i++) { grid->By(i, 0, 0) += coeffXY * (grid->Ez(i, 0, 0) - grid->Ez(i - 1, 0, 0)); grid->Bz(i, 0, 0) += -coeffXZ * (grid->Ey(i, 0, 0) - grid->Ey(i - 1, 0, 0)); } } // Update grid values of electric field in FDTD. inline void FDTD::updateE() { if (grid->dimensionality == 3) updateE3D(); else if (grid->dimensionality == 2) updateE2D(); else if (grid->dimensionality == 1) updateE1D(); } inline void FDTD::updateE3D() { updateEAreaBegin = Int3(0, 0, 0); updateEAreaEnd = grid->numCells - Int3(1, 1, 1); for (int d = 0; d < 3; ++d) { internalEAreaBegin[d] = std::max(updateEAreaBegin[d], pml->leftDims[d]); internalEAreaEnd[d] = std::min(updateEAreaEnd[d], grid->numCells[d] - pml->rightDims[d]); } const FP coeffCurrent = -(FP)4 * constants::pi * dt; const FP cdt = constants::c * dt; const FP coeffXY = cdt / (grid->steps.x * anisotropyCoeff.y); const FP coeffXZ = cdt / (grid->steps.x * anisotropyCoeff.z); const FP coeffYX = cdt / (grid->steps.y * anisotropyCoeff.x); const FP coeffYZ = cdt / (grid->steps.y * anisotropyCoeff.z); const FP coeffZX = cdt / (grid->steps.z * anisotropyCoeff.x); const FP coeffZY = cdt / (grid->steps.z * anisotropyCoeff.y); // In internal area use: // e.x(i, j, k) += dt * -4pi * j.x(i, j, k) + c * dt * ((b.z(i, j+1, k) - // b.z(i, j, k)) / eps_y * dy - (b.y(i, j, k+1) - b.y(i, j, k)) / eps_z * dz), // e.y(i, j, k) += dt * -4pi * j.y(i, j, k) + c * dt * ((b.x(i, j, k+1) - // b.x(i, j, k)) / eps_z * dz - (b.z(i+1, j, k) - b.z(i, j, k)) / eps_x * dx), // e.z(i, j, k) += dt * -4pi * j.z(i, j, k) + c * dt * ((b.y(i+1, j, k) - // b.y(i, j, k)) / eps_x * dx - (b.x(i, j+1, k) - b.x(i, j, k)) / eps_y * dy), const Int3 begin = internalEAreaBegin; const Int3 end = internalEAreaEnd; #pragma omp parallel for collapse(2) for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) { #pragma simd for (int k = begin.z; k < end.z; k++) { grid->Ex(i, j, k) += coeffCurrent * grid->Jx(i, j, k) + coeffYX * (grid->Bz(i, j + 1, k) - grid->Bz(i, j, k)) - coeffZX * (grid->By(i, j, k + 1) - grid->By(i, j, k)); grid->Ey(i, j, k) += coeffCurrent * grid->Jy(i, j, k) + coeffZY * (grid->Bx(i, j, k + 1) - grid->Bx(i, j, k)) - coeffXY * (grid->Bz(i + 1, j, k) - grid->Bz(i, j, k)); grid->Ez(i, j, k) += coeffCurrent * grid->Jz(i, j, k) + coeffXZ * (grid->By(i + 1, j, k) - grid->By(i, j, k)) - coeffYZ * (grid->Bx(i, j + 1, k) - grid->Bx(i, j, k)); } } // Process edge values if (updateEAreaEnd.x == grid->numCells.x - 1) { int i = updateEAreaEnd.x; #pragma omp parallel for for (int j = begin.y; j < end.y; j++) for (int k = begin.z; k < end.z; k++) grid->Ex(i, j, k) += coeffCurrent * grid->Jx(i, j, k) + coeffYX * (grid->Bz(i, j + 1, k) - grid->Bz(i, j, k)) - coeffZX * (grid->By(i, j, k + 1) - grid->By(i, j, k)); } if (updateEAreaEnd.y == grid->numCells.y - 1) { int j = updateEAreaEnd.y; #pragma omp parallel for for (int i = begin.x; i < end.x; i++) for (int k = begin.z; k < end.z; k++) grid->Ey(i, j, k) += coeffCurrent * grid->Jy(i, j, k) + coeffZY * (grid->Bx(i, j, k + 1) - grid->Bx(i, j, k)) - coeffXY * (grid->Bz(i + 1, j, k) - grid->Bz(i, j, k)); } if (updateEAreaEnd.z == grid->numCells.z - 1) { int k = updateEAreaEnd.z; #pragma omp parallel for for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) grid->Ez(i, j, k) += coeffCurrent * grid->Jz(i, j, k) + coeffXZ * (grid->By(i + 1, j, k) - grid->By(i, j, k)) - coeffYZ * (grid->Bx(i, j + 1, k) - grid->Bx(i, j, k)); } } inline void FDTD::updateE2D() { updateEAreaBegin = Int3(0, 0, 0); updateEAreaEnd = grid->numCells - Int3(1, 1, 0); for (int d = 0; d < 2; ++d) { internalEAreaBegin[d] = std::max(updateEAreaBegin[d], pml->leftDims[d]); internalEAreaEnd[d] = std::min(updateEAreaEnd[d], grid->numCells[d] - pml->rightDims[d]); } const FP coeffCurrent = -(FP)4 * constants::pi * dt; const FP cdt = constants::c * dt; const FP coeffXY = cdt / (grid->steps.x * anisotropyCoeff.y); const FP coeffXZ = cdt / (grid->steps.x * anisotropyCoeff.z); const FP coeffYX = cdt / (grid->steps.y * anisotropyCoeff.x); const FP coeffYZ = cdt / (grid->steps.y * anisotropyCoeff.z); // In internal area use: // e.x(i, j, k) += dt * -4pi * j.x(i, j, k) + c * dt * ((b.z(i, j+1, k) - // b.z(i, j, k)) / eps_y * dy - (b.y(i, j, k+1) - b.y(i, j, k)) / eps_z * dz), // e.y(i, j, k) += dt * -4pi * j.y(i, j, k) + c * dt * ((b.x(i, j, k+1) - // b.x(i, j, k)) / eps_z * dz - (b.z(i+1, j, k) - b.z(i, j, k)) / eps_x * dx), // e.z(i, j, k) += dt * -4pi * j.z(i, j, k) + c * dt * ((b.y(i+1, j, k) - // b.y(i, j, k)) / eps_x * dx - (b.x(i, j+1, k) - b.x(i, j, k)) / eps_y * dy), const Int3 begin = internalEAreaBegin; const Int3 end = internalEAreaEnd; #pragma omp parallel for for (int i = begin.x; i < end.x; i++) { #pragma simd for (int j = begin.y; j < end.y; j++) { grid->Ex(i, j, 0) += coeffCurrent * grid->Jx(i, j, 0) + coeffYX * (grid->Bz(i, j + 1, 0) - grid->Bz(i, j, 0)); grid->Ey(i, j, 0) += coeffCurrent * grid->Jy(i, j, 0) - coeffXY * (grid->Bz(i + 1, j, 0) - grid->Bz(i, j, 0)); grid->Ez(i, j, 0) += coeffCurrent * grid->Jz(i, j, 0) + coeffXZ * (grid->By(i + 1, j, 0) - grid->By(i, j, 0)) - coeffYZ * (grid->Bx(i, j + 1, 0) - grid->Bx(i, j, 0)); } } // Process edge values if (updateEAreaEnd.x == grid->numCells.x - 1) { int i = updateEAreaEnd.x; #pragma omp parallel for for (int j = begin.y; j < end.y; j++) grid->Ex(i, j, 0) += coeffCurrent * grid->Jx(i, j, 0) + coeffYX * (grid->Bz(i, j + 1, 0) - grid->Bz(i, j, 0)); } if (updateEAreaEnd.y == grid->numCells.y - 1) { int j = updateEAreaEnd.y; #pragma omp parallel for for (int i = begin.x; i < end.x; i++) grid->Ey(i, j, 0) += coeffCurrent * grid->Jy(i, j, 0) - coeffXY * (grid->Bz(i + 1, j, 0) - grid->Bz(i, j, 0)); } } inline void FDTD::updateE1D() { updateEAreaBegin = Int3(0, 0, 0); updateEAreaEnd = grid->numCells - Int3(1, 0, 0); for (int d = 0; d < 1; ++d) { internalEAreaBegin[d] = std::max(updateEAreaBegin[d], pml->leftDims[d]); internalEAreaEnd[d] = std::min(updateEAreaEnd[d], grid->numCells[d] - pml->rightDims[d]); } const FP coeffCurrent = -(FP)4 * constants::pi * dt; const FP cdt = constants::c * dt; const FP coeffXY = cdt / (grid->steps.x * anisotropyCoeff.y); const FP coeffXZ = cdt / (grid->steps.x * anisotropyCoeff.z); // In internal area use: // e.x(i, j, k) += dt * -4pi * j.x(i, j, k) + c * dt * ((b.z(i, j+1, k) - // b.z(i, j, k)) / eps_y * dy - (b.y(i, j, k+1) - b.y(i, j, k)) / eps_z * dz), // e.y(i, j, k) += dt * -4pi * j.y(i, j, k) + c * dt * ((b.x(i, j, k+1) - // b.x(i, j, k)) / eps_z * dz - (b.z(i+1, j, k) - b.z(i, j, k)) / eps_x * dx), // e.z(i, j, k) += dt * -4pi * j.z(i, j, k) + c * dt * ((b.y(i+1, j, k) - // b.y(i, j, k)) / eps_x * dx - (b.x(i, j+1, k) - b.x(i, j, k)) / eps_y * dy), const Int3 begin = internalEAreaBegin; const Int3 end = internalEAreaEnd; #pragma omp parallel for for (int i = begin.x; i < end.x; i++) { grid->Ex(i, 0, 0) += coeffCurrent * grid->Jx(i, 0, 0); grid->Ey(i, 0, 0) += coeffCurrent * grid->Jy(i, 0, 0) - coeffXY * (grid->Bz(i + 1, 0, 0) - grid->Bz(i, 0, 0)); grid->Ez(i, 0, 0) += coeffCurrent * grid->Jz(i, 0, 0) + coeffXZ * (grid->By(i + 1, 0, 0) - grid->By(i, 0, 0)); } } }
omp_parallel_sections_firstprivate.c
<ompts:test> <ompts:testdescription>Test which checks the omp parallel sections firstprivate directive.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp parallel sections firstprivate</ompts:directive> <ompts:dependences>omp critical</ompts:dependences> <ompts:testcode> #include <stdio.h> #include "omp_testsuite.h" int <ompts:testcode:functionname>omp_parallel_sections_firstprivate</ompts:testcode:functionname>(FILE * logFile){ int sum=7; int sum0=11; int known_sum; #pragma omp parallel sections <ompts:check>firstprivate(sum0)</ompts:check><ompts:crosscheck>private(sum0)</ompts:crosscheck> { #pragma omp section { #pragma omp critical { sum= sum+sum0; } /*end of critical */ } #pragma omp section { #pragma omp critical { sum= sum+sum0; } /*end of critical */ } #pragma omp section { #pragma omp critical { sum= sum+sum0; } /*end of critical */ } } /*end of parallel sections*/ known_sum=11*3+7; return (known_sum==sum); } /* end of check_section_firstprivate*/ </ompts:testcode> </ompts:test>
surf_term.c
#include "mex.h" #include "conv2d.h" #define DEBUG 0 void surf_term(int Nfp, int K, double *h, double *h_ext, double *u, double *v, double *nx, double *ny, double *eidM, double *eidP, signed char *eidtype, signed char *EToR, double *dflux) { int i, j; #ifdef _OPENMP #pragma omp parallel for private(j) num_threads(DG_THREADS) #endif for (i = 0; i < K; i++) { if ((cell_type)EToR[i] == REFINE) continue; int ind = i * Nfp; for (j = 0; j < Nfp; j++) { int iM = (int)eidM[ind] - 1; // change index to C type int iP = (int)eidP[ind] - 1; double f_M = h[iM]; // local and adjacent node values double hP = h[iP]; double uM = u[iM], vM = v[iM]; // outward normal vector of local element double nx_ = nx[ind]; double ny_ = ny[ind]; double f_ext; // external values on local nodes f_ext = h_ext[iM]; bc_type type = (bc_type)eidtype[ind]; // get adjacent values hP, qxP, qyP, considering // various boudnary conditions double f_P; int info = bound_cond(f_M, hP, f_ext, nx_, ny_, type, &f_P); // if(info) mexErrMsgTxt("Unknown boundary conditions."); double numflux, E, G; upwind_flux(f_M, f_P, uM, vM, nx_, ny_, &numflux); nodal_flux(f_M, uM, vM, &E, &G); #if DEBUG mexPrintf("n = %d, k = %d, num_flux = %e, E = %f, G = %f\n", j, i, numflux, E, G); #endif dflux[ind] = -numflux + nx_ * E + ny_ * G; ind++; } } return; }
tools.c
#include "tools.h" #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> gsl_rng **_random = NULL; double tool_get_time() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1e-6; } double tool_cut(double x) { if (x <= 0.0) return 0.0; if (x >= 1.0) return 1.0; return x; } void print_array(double c[], int l) { for (int i = 0; i < l - 1; i++) { printf("%.3f ", c[i]); } printf("%.3f\n", c[l - 1]); } void print_array_b(bool c[], int l) { for (int i = 0; i < l - 1; i++) { printf("%d,", c[i] ? 1 : 0); } printf("%d\n", c[l - 1] ? 1 : 0); } void print_array_i(int c[], int l) { for (int i = 0; i < l - 1; i++) { printf("%d,", c[i]); } printf("%d\n", c[l - 1]); } void print_array_ui(unsigned int c[], int l) { for (int i = 0; i < l - 1; i++) { printf("%u,", c[i]); } printf("%u\n", c[l - 1]); } void print_array_2d(bool **c, int xlen, int ylen) { for (int x = 0; x < xlen; x++) { for (int y = 0; y < ylen; y++) { printf("%d,", c[x][y]); } puts(""); } } void print_array_2d_d(double **c, int xlen, int ylen) { for (int x = 0; x < xlen; x++) { for (int y = 0; y < ylen; y++) { printf("%.2f,", c[x][y]); } puts(""); } } void tool_free_array_d(double **a, int xlen) { for (int x = 0; x < xlen; x++) { free(a[x]); } free(a); } void tool_free_array_b(bool **a, int xlen) { for (int x = 0; x < xlen; x++) { free(a[x]); } free(a); } void tool_free_array_ui(unsigned int **a, int xlen) { for (int x = 0; x < xlen; x++) { free(a[x]); } free(a); } bool **tool_alloc_array_b(int xlen, int ylen) { bool **a = (bool **)malloc(xlen * sizeof(bool *)); for (int x = 0; x < xlen; x++) { a[x] = (bool *)calloc((size_t)ylen, sizeof(bool)); } return a; } unsigned int **tool_alloc_array_ui(int xlen, int ylen) { unsigned int **a = (unsigned int **)malloc(xlen * sizeof(unsigned int *)); for (int x = 0; x < xlen; x++) { a[x] = (unsigned int *)calloc((size_t)ylen, sizeof(unsigned int)); } return a; } double **tool_alloc_array_d(int xlen, int ylen) { double **a = (double **)malloc(xlen * sizeof(double *)); for (int x = 0; x < xlen; x++) { a[x] = (double *)calloc((size_t)ylen, sizeof(double)); } return a; } void tool_array_bool_to_double(double *res, bool *src, int len) { for (int i = 0; i < len; i++) { res[i] = src[i] ? 1.0 : 0.0; } } void tool_round(double **a, int xlen, int ylen) { for (int x = 0; x < xlen; x++) for (int y = 0; y < ylen; y++) a[x][y] = round(a[x][y]); } static int tool_current_thread() { #ifdef _USE_OPENMP return omp_get_thread_num(); #else return 0; #endif } double tool_rand() { return gsl_rng_uniform(_random[tool_current_thread()]); } /* * Returns random int from [min, max-1] */ int tool_rand_i(int min, int max) { return min + gsl_rng_uniform_int(_random[tool_current_thread()], max - min); } void tool_fill_random1(double *a, int xlen, bool deterministic) { for (int x = 0; x < xlen; x++) { a[x] = tool_rand(); if (deterministic) { a[x] = a[x] > 0.5 ? 1 : 0; } } } void tool_fill_random1ui(unsigned int *a, int xlen, int min, int max) { for (int x = 0; x < xlen; x++) { a[x] = (unsigned int)tool_rand_i(min, max); } } void tool_fill_random1i(int *a, int xlen, int min, int max) { for (int x = 0; x < xlen; x++) { a[x] = tool_rand_i(min, max); } } void tool_fill_random1b(bool *a, int xlen) { for (int x = 0; x < xlen; x++) { a[x] = tool_rand() < 0.5; } } void tool_fill_random(double **a, int xlen, int ylen, bool deterministic) { for (int x = 0; x < xlen; x++) { for (int y = 0; y < ylen; y++) { a[x][y] = tool_rand(); if (deterministic) { a[x][y] = a[x][y] > 0.5 ? 1 : 0; } } } } void tool_fill_random_d(double **a, int xlen, int ylen) { for (int x = 0; x < xlen; x++) { for (int y = 0; y < ylen; y++) { a[x][y] = tool_rand(); // < 0.5 ? 1 : 0; } } } void tool_fill_random_b(bool **a, int xlen, int ylen) { for (int x = 0; x < xlen; x++) { double p = tool_rand(); for (int y = 0; y < ylen; y++) { a[x][y] = tool_rand() <= p; } } } void tool_fill_random_ui(unsigned int **a, int xlen, int ylen, int min, int max) { for (int x = 0; x < xlen; x++) { for (int y = 0; y < ylen; y++) { a[x][y] = (unsigned int)tool_rand_i(min, max); } } } double tool_sum(double **a, int xlen, int ylen) { double res = 0; for (int x = 0; x < xlen; x++) for (int y = 0; y < ylen; y++) res += a[x][y]; return res; } int tool_foldedSize(int s, int r) { return s + 2 * r; } void tool_fold(bool *f_res, bool *u_src, int u_len, int r) { memcpy(f_res + r, u_src, (size_t)u_len * sizeof(bool)); memcpy(f_res, u_src + u_len - r, (size_t)r * sizeof(bool)); memcpy(f_res + r + u_len, u_src, (size_t)r * sizeof(bool)); } void tool_fold_d(double *f_res, double *u_src, int u_len, int r) { memcpy(f_res + r, u_src, (size_t)u_len * sizeof(double)); memcpy(f_res, u_src + u_len - r, (size_t)r * sizeof(double)); memcpy(f_res + r + u_len, u_src, (size_t)r * sizeof(double)); } void tool_unfold(bool u_res[], bool f_src[], int f_len, int r) { memcpy(u_res, f_src + r, (size_t)(f_len - 2 * r) * sizeof(bool)); } void tool_unfold_d(double u_res[], double f_src[], int f_len, int r) { memcpy(u_res, f_src + r, (size_t)(f_len - 2 * r) * sizeof(double)); } void tool_add(double **res, bool **src, int xlen, int ylen) { for (int x = 0; x < xlen; x++) for (int y = 0; y < ylen; y++) res[x][y] += src[x][y] ? 1.0 : 0.0; } void tool_mult(double **a, double c, int xlen, int ylen) { for (int x = 0; x < xlen; x++) for (int y = 0; y < ylen; y++) a[x][y] *= c; } void tool_abs_subst(double **res, bool **a, double **b, int xlen, int ylen) { for (int x = 0; x < xlen; x++) for (int y = 0; y < ylen; y++) { res[x][y] = fabs((a[x][y] ? 1.0 : 0.0) - b[x][y]); } } double tool_1d_max(double *a, int len) { double max = 0.0; for (int i = 0; i < len; i++) { if (a[i] > max) max = a[i]; } return max; } int tool_differences(bool **a, bool **b, int xlen, int ylen) { int result = 0; for (int x = 0; x < xlen; x++) { for (int y = 0; y < ylen; y++) { if (a[x][y] && !b[x][y]) result++; if (!a[x][y] && b[x][y]) result++; } } return result; } static int tool_num_threads() { #ifdef _USE_OPENMP return omp_get_num_threads(); #else return 1; #endif } void tool_fake_random() { int threads = 0; #ifdef _USE_OPENMP #pragma omp parallel #pragma omp master #endif { threads = tool_num_threads(); const gsl_rng_type *T; gsl_rng_env_setup(); T = gsl_rng_default; _random = (gsl_rng **)malloc(threads * sizeof(gsl_rng *)); for (int i = 0; i < threads; i++) { _random[i] = gsl_rng_alloc(T); gsl_rng_set(_random[i], 1234567890); } } } void tool_init_random() { int threads = 0; #ifdef _USE_OPENMP #pragma omp parallel #pragma omp master #endif { if (_random == NULL) { threads = tool_num_threads(); #ifndef _USE_RAND #ifdef __APPLE__ srandomdev(); #else srandom((unsigned int)tool_get_time()); #endif #else srand((unsigned int)tool_get_time()); #endif const gsl_rng_type *T; gsl_rng_env_setup(); T = gsl_rng_default; _random = (gsl_rng **)malloc(threads * sizeof(gsl_rng *)); for (int i = 0; i < threads; i++) { _random[i] = gsl_rng_alloc(T); unsigned long int seed; #ifdef _USE_RAND seed = (unsigned long int)rand(); #else seed = (unsigned long int)random(); #endif gsl_rng_set(_random[i], seed); } } } } double tool_rand_gauss(double sigma) { return gsl_ran_gaussian_ziggurat(_random[tool_current_thread()], sigma); } int tool_cap(int a, int min, int max) { if (a > max) return max; if (a < min) return min; return a; } void tool_range(int *result, int start, int len) { for (int i = start; i < start + len; i++) { result[i - start] = i; } } int *tool_alloc_range(int start, int len) { int *a = (int *)malloc(sizeof(int) * len); tool_range(a, start, len); return a; } #ifdef __APPLE__ int _compr(void *f, const void *a, const void *b) { #else int _compr(const void *a, const void *b, void *f) { #endif double *fitness = (double *)f; int i1 = (int)(*(int *)a); int i2 = (int)(*(int *)b); double diff = fitness[i1] - fitness[i2]; if (diff == 0.0) return 0; if (diff > 0.0) return 1; return -1; } int *tool_sort_fitness_index(int *index, double *fitness, int count) { #ifdef __APPLE__ qsort_r(index, count, sizeof(int), fitness, _compr); #else qsort_r(index, count, sizeof(int), _compr, fitness); #endif return index; } #ifdef __APPLE__ int _compr_int(void *f, const void *a, const void *b) { #else int _compr_int(const void *a, const void *b, void *f) { #endif int *fitness = (int *)f; int i1 = (int)(*(int *)a); int i2 = (int)(*(int *)b); int diff = fitness[i1] - fitness[i2]; if (diff == 0) return 0; if (diff > 0) return 1; return -1; } int *tool_sort_index_int(int *index, int *array, int count) { #ifdef __APPLE__ qsort_r(index, count, sizeof(int), array, _compr_int); #else qsort_r(index, count, sizeof(int), _compr_int, array); #endif return index; } bool tool_row_cmp(bool *r1, bool *r2, int len) { for (int i = 0; i < len; i++) { if (r1[i] != r2[i]) return false; } return true; } double tool_avg(double *a, int len) { double result = 0; for (int i = 0; i < len; i++) result += a[i]; return result / len; } double tool_max(double *a, int len) { double result = a[0]; for (int i = 1; i < len; i++) { if (result < a[i]) result = a[i]; } return result; } int tool_max_i(int *a, int len) { int result = a[0]; for (int i = 1; i < len; i++) { if (result < a[i]) result = a[i]; } return result; } double tool_min(double *a, int len) { double result = a[0]; for (int i = 1; i < len; i++) { if (result > a[i]) result = a[i]; } return result; } int tool_min_i(int *a, int len) { int result = a[0]; for (int i = 1; i < len; i++) { if (result > a[i]) result = a[i]; } return result; } double tool_max_ix(double *a, int len, int *id) { double result = a[0]; (*id) = 0; for (int i = 1; i < len; i++) { if (result < a[i]) { result = a[i]; (*id) = i; } } return result; } double tool_min_ix(double *a, int len, int *id) { double result = a[0]; (*id) = 0; for (int i = 1; i < len; i++) { if (result > a[i]) { result = a[i]; (*id) = i; } } return result; } double tool_1d_sum(double *a, int len) { double result = 0; for (int i = 0; i < len; i++) { result += a[i]; } return result; } bool tool_compar_b(bool *a, bool *b, int len) { for (int i = 0; i < len; i++) { if (a[i] != b[i]) return false; } return true; } bool tool_compar_d(double *a, double *b, int len) { for (int i = 0; i < len; i++) { if (a[i] != b[i]) return false; } return true; }
pooling_2x2.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void pooling2x2s2_max_neon(const Mat& bottom_blob, Mat& top_blob) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = w - 2*outw + w; #pragma omp parallel for for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); float* outptr = top_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _max0 = vmaxq_f32(_r00, _r10); float32x4_t _max1 = vmaxq_f32(_r01, _r11); float32x4_t _max = vpmaxq_f32(_max0, _max1); vst1q_f32(outptr, _max); r0 += 8; r1 += 8; outptr += 4; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%1, #256] \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%1]! \n" "vld1.f32 {d4-d7}, [%2]! \n" "vmax.f32 q0, q0, q2 \n" "vmax.f32 q1, q1, q3 \n" "vpmax.f32 d4, d0, d1 \n" "vpmax.f32 d5, d2, d3 \n" "subs %0, #1 \n" "vst1.f32 {d4-d5}, [%3]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(outptr) // %3 : "0"(nn), "1"(r0), "2"(r1), "3"(outptr) : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float max0 = std::max(r0[0], r0[1]); float max1 = std::max(r1[0], r1[1]); *outptr = std::max(max0, max1); r0 += 2; r1 += 2; outptr++; } r0 += tailstep; r1 += tailstep; } } }
convolutiondepthwise_5x5_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw5x5s1_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + g * 4, 0) : (v4f32)__msa_fill_w(0); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); float* outptr1 = out.row(1); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* r3 = img0.row(3); const float* r4 = img0.row(4); const float* r5 = img0.row(5); int i = 0; for (; i + 1 < outh; i += 2) { int j = 0; for (; j < outw; j++) { __builtin_prefetch(r0 + 16); __builtin_prefetch(r1 + 16); __builtin_prefetch(r2 + 16); __builtin_prefetch(r3 + 16); __builtin_prefetch(r4 + 16); __builtin_prefetch(r5 + 16); __builtin_prefetch(k0 + 400); v4f32 _sum0 = _bias0; v4f32 _sum1 = _bias0; v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k03 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k04 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k00, _r00); _sum0 = __msa_fmadd_w(_sum0, _k01, _r01); _sum0 = __msa_fmadd_w(_sum0, _k02, _r02); _sum0 = __msa_fmadd_w(_sum0, _k03, _r03); _sum0 = __msa_fmadd_w(_sum0, _k04, _r04); v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0); v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0); v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0); v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0); v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0); _sum1 = __msa_fmadd_w(_sum1, _k00, _r10); _sum1 = __msa_fmadd_w(_sum1, _k01, _r11); _sum1 = __msa_fmadd_w(_sum1, _k02, _r12); _sum1 = __msa_fmadd_w(_sum1, _k03, _r13); _sum1 = __msa_fmadd_w(_sum1, _k04, _r14); v4f32 _k10 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k13 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k14 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k10, _r10); _sum0 = __msa_fmadd_w(_sum0, _k11, _r11); _sum0 = __msa_fmadd_w(_sum0, _k12, _r12); _sum0 = __msa_fmadd_w(_sum0, _k13, _r13); _sum0 = __msa_fmadd_w(_sum0, _k14, _r14); v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0); v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0); v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0); v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0); v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0); _sum1 = __msa_fmadd_w(_sum1, _k10, _r20); _sum1 = __msa_fmadd_w(_sum1, _k11, _r21); _sum1 = __msa_fmadd_w(_sum1, _k12, _r22); _sum1 = __msa_fmadd_w(_sum1, _k13, _r23); _sum1 = __msa_fmadd_w(_sum1, _k14, _r24); v4f32 _k20 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k23 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k24 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k20, _r20); _sum0 = __msa_fmadd_w(_sum0, _k21, _r21); _sum0 = __msa_fmadd_w(_sum0, _k22, _r22); _sum0 = __msa_fmadd_w(_sum0, _k23, _r23); _sum0 = __msa_fmadd_w(_sum0, _k24, _r24); v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0); v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0); v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0); v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0); v4f32 _r34 = (v4f32)__msa_ld_w(r3 + 4 * 4, 0); _sum1 = __msa_fmadd_w(_sum1, _k20, _r30); _sum1 = __msa_fmadd_w(_sum1, _k21, _r31); _sum1 = __msa_fmadd_w(_sum1, _k22, _r32); _sum1 = __msa_fmadd_w(_sum1, _k23, _r33); _sum1 = __msa_fmadd_w(_sum1, _k24, _r34); v4f32 _k30 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k31 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k32 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k33 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k34 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k30, _r30); _sum0 = __msa_fmadd_w(_sum0, _k31, _r31); _sum0 = __msa_fmadd_w(_sum0, _k32, _r32); _sum0 = __msa_fmadd_w(_sum0, _k33, _r33); _sum0 = __msa_fmadd_w(_sum0, _k34, _r34); v4f32 _r40 = (v4f32)__msa_ld_w(r4, 0); v4f32 _r41 = (v4f32)__msa_ld_w(r4 + 4, 0); v4f32 _r42 = (v4f32)__msa_ld_w(r4 + 4 * 2, 0); v4f32 _r43 = (v4f32)__msa_ld_w(r4 + 4 * 3, 0); v4f32 _r44 = (v4f32)__msa_ld_w(r4 + 4 * 4, 0); _sum1 = __msa_fmadd_w(_sum1, _k30, _r40); _sum1 = __msa_fmadd_w(_sum1, _k31, _r41); _sum1 = __msa_fmadd_w(_sum1, _k32, _r42); _sum1 = __msa_fmadd_w(_sum1, _k33, _r43); _sum1 = __msa_fmadd_w(_sum1, _k34, _r44); v4f32 _k40 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k41 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k42 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k43 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k44 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 -= 4 * 20; _sum0 = __msa_fmadd_w(_sum0, _k40, _r40); _sum0 = __msa_fmadd_w(_sum0, _k41, _r41); _sum0 = __msa_fmadd_w(_sum0, _k42, _r42); _sum0 = __msa_fmadd_w(_sum0, _k43, _r43); _sum0 = __msa_fmadd_w(_sum0, _k44, _r44); v4f32 _r50 = (v4f32)__msa_ld_w(r5, 0); v4f32 _r51 = (v4f32)__msa_ld_w(r5 + 4, 0); v4f32 _r52 = (v4f32)__msa_ld_w(r5 + 4 * 2, 0); v4f32 _r53 = (v4f32)__msa_ld_w(r5 + 4 * 3, 0); v4f32 _r54 = (v4f32)__msa_ld_w(r5 + 4 * 4, 0); _sum1 = __msa_fmadd_w(_sum1, _k40, _r50); _sum1 = __msa_fmadd_w(_sum1, _k41, _r51); _sum1 = __msa_fmadd_w(_sum1, _k42, _r52); _sum1 = __msa_fmadd_w(_sum1, _k43, _r53); _sum1 = __msa_fmadd_w(_sum1, _k44, _r54); __msa_st_w((v4i32)_sum0, outptr0, 0); __msa_st_w((v4i32)_sum1, outptr1, 0); outptr0 += 4; outptr1 += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } r0 += 4 * 4 + w * 4; r1 += 4 * 4 + w * 4; r2 += 4 * 4 + w * 4; r3 += 4 * 4 + w * 4; r4 += 4 * 4 + w * 4; r5 += 4 * 4 + w * 4; outptr0 += outw * 4; outptr1 += outw * 4; } for (; i < outh; i++) { int j = 0; for (; j < outw; j++) { __builtin_prefetch(r0 + 16); __builtin_prefetch(r1 + 16); __builtin_prefetch(r2 + 16); __builtin_prefetch(r3 + 16); __builtin_prefetch(r4 + 16); __builtin_prefetch(k0 + 400); v4f32 _sum0 = _bias0; v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k03 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k04 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k00, _r00); _sum0 = __msa_fmadd_w(_sum0, _k01, _r01); _sum0 = __msa_fmadd_w(_sum0, _k02, _r02); _sum0 = __msa_fmadd_w(_sum0, _k03, _r03); _sum0 = __msa_fmadd_w(_sum0, _k04, _r04); v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0); v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0); v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0); v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0); v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0); v4f32 _k10 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k13 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k14 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k10, _r10); _sum0 = __msa_fmadd_w(_sum0, _k11, _r11); _sum0 = __msa_fmadd_w(_sum0, _k12, _r12); _sum0 = __msa_fmadd_w(_sum0, _k13, _r13); _sum0 = __msa_fmadd_w(_sum0, _k14, _r14); v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0); v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0); v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0); v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0); v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0); v4f32 _k20 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k23 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k24 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k20, _r20); _sum0 = __msa_fmadd_w(_sum0, _k21, _r21); _sum0 = __msa_fmadd_w(_sum0, _k22, _r22); _sum0 = __msa_fmadd_w(_sum0, _k23, _r23); _sum0 = __msa_fmadd_w(_sum0, _k24, _r24); v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0); v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0); v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0); v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0); v4f32 _r34 = (v4f32)__msa_ld_w(r3 + 4 * 4, 0); v4f32 _k30 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k31 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k32 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k33 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k34 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k30, _r30); _sum0 = __msa_fmadd_w(_sum0, _k31, _r31); _sum0 = __msa_fmadd_w(_sum0, _k32, _r32); _sum0 = __msa_fmadd_w(_sum0, _k33, _r33); _sum0 = __msa_fmadd_w(_sum0, _k34, _r34); v4f32 _r40 = (v4f32)__msa_ld_w(r4, 0); v4f32 _r41 = (v4f32)__msa_ld_w(r4 + 4, 0); v4f32 _r42 = (v4f32)__msa_ld_w(r4 + 4 * 2, 0); v4f32 _r43 = (v4f32)__msa_ld_w(r4 + 4 * 3, 0); v4f32 _r44 = (v4f32)__msa_ld_w(r4 + 4 * 4, 0); v4f32 _k40 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k41 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k42 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k43 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k44 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 -= 4 * 20; _sum0 = __msa_fmadd_w(_sum0, _k40, _r40); _sum0 = __msa_fmadd_w(_sum0, _k41, _r41); _sum0 = __msa_fmadd_w(_sum0, _k42, _r42); _sum0 = __msa_fmadd_w(_sum0, _k43, _r43); _sum0 = __msa_fmadd_w(_sum0, _k44, _r44); __msa_st_w((v4i32)_sum0, outptr0, 0); outptr0 += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } r0 += 4 * 4; r1 += 4 * 4; r2 += 4 * 4; r3 += 4 * 4; r4 += 4 * 4; } } } static void convdw5x5s2_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + g * 4, 0) : (v4f32)__msa_fill_w(0); const float* k0 = kernel.row(g); float* outptr0 = out; const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* r3 = img0.row(3); const float* r4 = img0.row(4); int i = 0; for (; i < outh; i++) { int j = 0; for (; j < outw; j++) { __builtin_prefetch(r0 + 32); __builtin_prefetch(r1 + 32); __builtin_prefetch(r2 + 32); __builtin_prefetch(r3 + 32); __builtin_prefetch(r4 + 32); __builtin_prefetch(k0 + 400); v4f32 _sum0 = _bias0; v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k03 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k04 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k00, _r00); _sum0 = __msa_fmadd_w(_sum0, _k01, _r01); _sum0 = __msa_fmadd_w(_sum0, _k02, _r02); _sum0 = __msa_fmadd_w(_sum0, _k03, _r03); _sum0 = __msa_fmadd_w(_sum0, _k04, _r04); v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0); v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0); v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0); v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0); v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0); v4f32 _k10 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k13 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k14 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k10, _r10); _sum0 = __msa_fmadd_w(_sum0, _k11, _r11); _sum0 = __msa_fmadd_w(_sum0, _k12, _r12); _sum0 = __msa_fmadd_w(_sum0, _k13, _r13); _sum0 = __msa_fmadd_w(_sum0, _k14, _r14); v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0); v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0); v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0); v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0); v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0); v4f32 _k20 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k23 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k24 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k20, _r20); _sum0 = __msa_fmadd_w(_sum0, _k21, _r21); _sum0 = __msa_fmadd_w(_sum0, _k22, _r22); _sum0 = __msa_fmadd_w(_sum0, _k23, _r23); _sum0 = __msa_fmadd_w(_sum0, _k24, _r24); v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0); v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0); v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0); v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0); v4f32 _r34 = (v4f32)__msa_ld_w(r3 + 4 * 4, 0); v4f32 _k30 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k31 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k32 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k33 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k34 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k30, _r30); _sum0 = __msa_fmadd_w(_sum0, _k31, _r31); _sum0 = __msa_fmadd_w(_sum0, _k32, _r32); _sum0 = __msa_fmadd_w(_sum0, _k33, _r33); _sum0 = __msa_fmadd_w(_sum0, _k34, _r34); v4f32 _r40 = (v4f32)__msa_ld_w(r4, 0); v4f32 _r41 = (v4f32)__msa_ld_w(r4 + 4, 0); v4f32 _r42 = (v4f32)__msa_ld_w(r4 + 4 * 2, 0); v4f32 _r43 = (v4f32)__msa_ld_w(r4 + 4 * 3, 0); v4f32 _r44 = (v4f32)__msa_ld_w(r4 + 4 * 4, 0); v4f32 _k40 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k41 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k42 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k43 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k44 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 -= 4 * 20; _sum0 = __msa_fmadd_w(_sum0, _k40, _r40); _sum0 = __msa_fmadd_w(_sum0, _k41, _r41); _sum0 = __msa_fmadd_w(_sum0, _k42, _r42); _sum0 = __msa_fmadd_w(_sum0, _k43, _r43); _sum0 = __msa_fmadd_w(_sum0, _k44, _r44); __msa_st_w((v4i32)_sum0, outptr0, 0); outptr0 += 4; r0 += 4 * 2; r1 += 4 * 2; r2 += 4 * 2; r3 += 4 * 2; r4 += 4 * 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } }
gemm_blis.c
/** * This file is part of convGemm * * Copyright (C) 2021-22 Universitat Politècnica de València and * Universitat Jaume I * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "gemm_blis.h" cntx_t *blis_cntx = NULL; sgemm_ukr_ft blis_gemm_kernel = NULL; int blis_abi_version = BLIS_ABI_VERSION; /* * Initializes BLIS, blis_cntx and blis_gemm_kernel */ void gemm_blis_init() { if (blis_cntx == NULL) { bli_init(); blis_cntx = bli_gks_query_cntx(); blis_gemm_kernel = bli_cntx_get_l3_nat_ukr_dt(BLIS_FLOAT, (l3ukr_t) BLIS_GEMM, blis_cntx); } } /* * BLIS pack for M-->Mc */ void pack_RB(char orderM, char transM, int mc, int nc, const float *restrict M, int ldM, float *restrict Mc, int RR, const conv_p *conv_params, int start_row, int start_col) { int i, j, ii, k, rr; if ((transM == 'N') && (orderM == 'C')) M = &Mcol(start_row, start_col); else if ((transM == 'N') && (orderM == 'R')) M = &Mrow(start_row, start_col); else if ((transM == 'T') && (orderM == 'C')) M = &Mcol(start_col, start_row); else M = &Mrow(start_col, start_row); if (((transM == 'N') && (orderM == 'C')) || ((transM == 'T') && (orderM == 'R'))) #pragma omp parallel for private(i, j, ii, rr, k) for (i = 0; i < mc; i += RR) { k = i * nc; rr = min(mc - i, RR); for (j = 0; j < nc; j++) { for (ii = 0; ii < rr; ii++) { Mc[k] = Mcol(i + ii, j); k++; } for (ii = rr; ii < RR; ii++) { Mc[k] = (float) 0.0; k++; } // k += (RR-rr); } } else #pragma omp parallel for private(i, j, ii, rr, k) for (i = 0; i < mc; i += RR) { k = i * nc; rr = min(mc - i, RR); for (j = 0; j < nc; j++) { for (ii = 0; ii < rr; ii++) { Mc[k] = Mcol(j, i + ii); k++; } for (ii = rr; ii < RR; ii++) { Mc[k] = (float) 0.0; k++; } // k += (RR-rr); } } } /* * BLIS pack for M-->Mc */ void pack_CB(char orderM, char transM, int mc, int nc, const float *restrict M, int ldM, float *restrict Mc, int RR, const conv_p *conv_params, int start_row, int start_col) { int i, j, jj, k, nr; if ((transM == 'N') && (orderM == 'C')) M = &Mcol(start_row, start_col); else if ((transM == 'N') && (orderM == 'R')) M = &Mrow(start_row, start_col); else if ((transM == 'T') && (orderM == 'C')) M = &Mcol(start_col, start_row); else M = &Mrow(start_col, start_row); k = 0; if (((transM == 'N') && (orderM == 'C')) || ((transM == 'T') && (orderM == 'R'))) #pragma omp parallel for private(i, j, jj, nr, k) for (j = 0; j < nc; j += RR) { k = j * mc; nr = min(nc - j, RR); for (i = 0; i < mc; i++) { for (jj = 0; jj < nr; jj++) { Mc[k] = Mcol(i, j + jj); k++; } for (jj = nr; jj < RR; jj++) { Mc[k] = (float) 0.0; k++; } // k += (RR-nr); } } else #pragma omp parallel for private(i, j, jj, nr, k) for (j = 0; j < nc; j += RR) { k = j * mc; nr = min(nc - j, RR); for (i = 0; i < mc; i++) { for (jj = 0; jj < nr; jj++) { Mc[k] = Mcol(j + jj, i); k++; } for (jj = nr; jj < RR; jj++) { Mc[k] = (float) 0.0; k++; } // k += (RR-nr); } } } /* * sxpbyM implementation */ void sxpbyM(int m, int n, const float *restrict X, int ldx, float beta, float *restrict Y, int ldy) { if (beta == 0.0) { for (int j = 0; j < n; j++) for (int i = 0; i < m; i++) Y[j * ldy + i] = X[j * ldx + i]; } else if (beta == 1.0) { for (int j = 0; j < n; j++) for (int i = 0; i < m; i++) Y[j * ldy + i] += X[j * ldx + i]; } else { for (int j = 0; j < n; j++) for (int i = 0; i < m; i++) Y[j * ldy + i] = beta * Y[j * ldy + i] + X[j * ldx + i]; } }
GB_binop__isgt_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isgt_uint16) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__isgt_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__isgt_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_uint16) // A*D function (colscale): GB (_AxD__isgt_uint16) // D*A function (rowscale): GB (_DxB__isgt_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__isgt_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__isgt_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_uint16) // C=scalar+B GB (_bind1st__isgt_uint16) // C=scalar+B' GB (_bind1st_tran__isgt_uint16) // C=A+scalar GB (_bind2nd__isgt_uint16) // C=A'+scalar GB (_bind2nd_tran__isgt_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_UINT16 || GxB_NO_ISGT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isgt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isgt_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isgt_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isgt_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isgt_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isgt_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isgt_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isgt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isgt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isgt_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isgt_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isgt_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__isgt_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__isgt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bget_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bget_int16 // A.*B function (eWiseMult): GB_AemultB__bget_int16 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bget_int16 // C+=b function (dense accum): GB_Cdense_accumb__bget_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bget_int16 // C=scalar+B GB_bind1st__bget_int16 // C=scalar+B' GB_bind1st_tran__bget_int16 // C=A+scalar GB_bind2nd__bget_int16 // C=A'+scalar GB_bind2nd_tran__bget_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = GB_BITGET (aij, bij, int16_t, 16) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_BITGET (x, y, int16_t, 16) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_INT16 || GxB_NO_BGET_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bget_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bget_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bget_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bget_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bget_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bget_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t bij = Bx [p] ; Cx [p] = GB_BITGET (x, bij, int16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bget_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; Cx [p] = GB_BITGET (aij, y, int16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (x, aij, int16_t, 16) ; \ } GrB_Info GB_bind1st_tran__bget_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (aij, y, int16_t, 16) ; \ } GrB_Info GB_bind2nd_tran__bget_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
depend-3.c
#include <stdlib.h> #include <unistd.h> int main () { #pragma omp parallel #pragma omp single { int x = 1, y = 2; #pragma omp taskgroup { #pragma omp task shared (x) depend(in: x) { usleep (10000); if (x != 1) abort (); } #pragma omp taskgroup { #pragma omp task shared (x) depend(in: x) { usleep (15000); if (x != 1) abort (); } #pragma omp task shared (y) depend(inout: y) { if (y != 2) abort (); y = 3; } #pragma omp taskgroup { #pragma omp task shared (x) depend(in: x) { usleep (13000); if (x != 1) abort (); } #pragma omp taskgroup { #pragma omp task shared (x) depend(out: x) x = 2; } } } } } return 0; }
for_schedule_static.c
/* * * For static scheduling we check whether the chunks have the requested size, * with the legal exception of the last chunk. * Modified by Chunhua Liao */ #include <stdio.h> #include <omp.h> #include <unistd.h> #include <stdlib.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" #define CFSMAX_SIZE 1000 /****************************************************************/ int check_for_schedule_static (FILE * logFile) { const int chunk_size = 7; int threads; int tids[CFSMAX_SIZE]; int i; int result = 0; int ii = 0; int tid; #pragma omp parallel { /* begin of parallel */ #pragma omp single { threads = omp_get_num_threads (); } } /* end of parallel */ if (threads < 2) { printf ("This test only works with at least two threads"); fprintf (logFile, "This test only works with at least two threads"); return 0; } else { fprintf (logFile, "Using an internal count of %d\nUsing a specified chunksize of %d\n", CFSMAX_SIZE, chunk_size); #pragma omp parallel shared(tids) private(tid) { /* begin of parallel */ tid = omp_get_thread_num (); #pragma omp for schedule(static,chunk_size) for (i = 0; i < CFSMAX_SIZE; i++) { tids[i] = tid; } } /* end of parallel */ /* printf("debug---------------------\n"); for (i=0;i<CFSMAX_SIZE -1;i++) printf("%d ",tids[i]); printf("End of debug---------------------\n"); */ for (i = 0; i < CFSMAX_SIZE; ++i) { ii= (i/chunk_size) % threads; /*round-robin for static chunk*/ if (tids[i] != ii) { result++; fprintf (logFile, "Iteration %d should be assigned to %d instead of %d\n", i,ii,tids[i]); } } /*printf("Alles OK beim Test von schedule(static)\n"); */ return (result==0); } } /****************************************************************/ int crosscheck_for_schedule_static (FILE * logFile) { const int chunk_size = 7; int threads; int tids[CFSMAX_SIZE]; int i; int result = 0; int ii = 0; int tid; #pragma omp parallel { /* begin of parallel */ #pragma omp single { threads = omp_get_num_threads (); } } /* end of parallel */ if (threads < 2) { printf ("This test only works with at least two threads"); fprintf (logFile, "This test only works with at least two threads"); return 0; } else { fprintf (logFile, "Using an internal count of %d\nUsing a specified chunksize of %d\n", CFSMAX_SIZE, chunk_size); #pragma omp parallel shared(tids) private(tid) { /* begin of parallel */ tid = omp_get_thread_num (); #pragma omp for for (i = 0; i < CFSMAX_SIZE; i++) { tids[i] = tid; } } /* end of parallel */ /* printf("debug---------------------\n"); for (i=0;i<CFSMAX_SIZE -1;i++) printf("%d ",tids[i]); printf("End of debug---------------------\n"); */ for (i = 0; i < CFSMAX_SIZE; ++i) { ii= (i/chunk_size) % threads; /*round-robin for static chunk*/ if (tids[i] != ii) { result++; fprintf (logFile, "Iteration %d should be assigned to %d instead of %d\n", i,ii,tids[i]); } } /*printf("Alles OK beim Test von schedule(static)\n"); */ return (result==0); } }
mxEvaluateSourceTopography2d.c
#include "mex.h" #include "mxSWE2d.h" #define NRHS 4 #define NLHS 1 #define NVAR 3 void mexFunction(int nlhs, mxArray* plhs[], int nrhs, const mxArray* prhs[]) { /* check input & output */ if (nrhs != NRHS) { mexPrintf("Matlab:%s:InvalidNumberInput,\n", __FILE__); mexPrintf("%d inputs required.\n", NRHS); } if (nlhs != NLHS) { mexPrintf("Matlab:%s:InvalidNumberOutput,\n", __FILE__); mexPrintf("%d inputs required.\n", NLHS); } double gra = mxGetScalar(prhs[0]); signed char* regType = (signed char*)mxGetData(prhs[1]); // double* fphys = mxGetPr(prhs[2]); double* zgrad = mxGetPr(prhs[3]); PhysField fphys = convertMexToPhysField(prhs[2]); const size_t Np = fphys.Np; const size_t K = fphys.K; const size_t Ntmp = Np * K; const size_t NdimOut = 3; const mwSize dimOut[3] = {Np, K, NVAR}; plhs[0] = mxCreateNumericArray(NdimOut, dimOut, mxDOUBLE_CLASS, mxREAL); // double* h = fphys; // double* z = fphys + 3 * Ntmp; double* bx = zgrad; double* by = zgrad + Ntmp; PhysField source = convertMexToPhysField(plhs[0]); #ifdef _OPENMP #pragma omp parallel for num_threads(DG_THREADS) #endif for (int k = 0; k < K; k++) { NdgRegionType type = (NdgRegionType)regType[k]; if (type == NdgRegionWet) { for (int n = 0; n < Np; n++) { int sk = k * Np + n; const double eta_ = fphys.h[sk] + fphys.z[sk]; source.hu[sk] = -gra * eta_ * bx[sk]; source.hv[sk] = -gra * eta_ * by[sk]; } } } return; }
meanr_nthreads.c
#ifdef _OPENMP #include <omp.h> #endif #include <RNACI.h> #define MIN(a,b) ((a)<(b)?(a):(b)) static inline int num_threads() { int n = 0; #ifdef _OPENMP int nth, tl; #pragma omp parallel { nth = omp_get_num_threads(); tl = omp_get_thread_limit(); } n = MIN(nth, tl); #else n = 1; #endif return n; } SEXP R_meanr_nthreads() { SEXP nth; newRvec(nth, 1, "int"); INT(nth) = num_threads(); unhideGC(); return nth; }
graphAdjArrayList.c
// ----------------------------------------------------------------------------- // // "00_AccelGraph" // // ----------------------------------------------------------------------------- // Copyright (c) 2014-2019 All rights reserved // ----------------------------------------------------------------------------- // Author : Abdullah Mughrabi // Email : atmughra@ncsu.edu||atmughrabi@gmail.com // File : graphAdjArrayList.c // Create : 2019-06-21 17:15:17 // Revise : 2019-09-28 15:36:13 // Editor : Abdullah Mughrabi // ----------------------------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdint.h> #include <omp.h> #include "timer.h" #include "myMalloc.h" #include "graphConfig.h" #include "edgeList.h" #include "sortRun.h" #include "vertex.h" #include "adjArrayList.h" #include "graphAdjArrayList.h" #include "reorder.h" //edgelist prerpcessing // #include "countsort.h" // #include "radixsort.h" void graphAdjArrayListPrintMessageWithtime(const char *msg, double time) { printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", msg); printf(" -----------------------------------------------------\n"); printf("| %-51f | \n", time); printf(" -----------------------------------------------------\n"); } // A utility function that creates a graphAdjArrayList of V vertices struct GraphAdjArrayList *graphAdjArrayListGraphNew(uint32_t V) { struct GraphAdjArrayList *graphAdjArrayList = (struct GraphAdjArrayList *) my_malloc( sizeof(struct GraphAdjArrayList)); graphAdjArrayList->num_vertices = V; graphAdjArrayList->vertices = (struct AdjArrayList *) my_malloc( V * sizeof(struct AdjArrayList)); uint32_t i; for(i = 0; i < V; i++) { graphAdjArrayList->vertices[i].outNodes = NULL; graphAdjArrayList->vertices[i].out_degree = 0; #if DIRECTED graphAdjArrayList->vertices[i].inNodes = NULL; graphAdjArrayList->vertices[i].in_degree = 0; #endif } return graphAdjArrayList; } struct GraphAdjArrayList *graphAdjArrayListEdgeListNew(struct EdgeList *edgeList) { struct Timer *timer = (struct Timer *) my_malloc( sizeof(struct Timer)); struct GraphAdjArrayList *graphAdjArrayList; Start(timer); graphAdjArrayList = graphAdjArrayListGraphNew(edgeList->num_vertices); graphAdjArrayList->num_edges = edgeList->num_edges; graphAdjArrayList->avg_degree = edgeList->num_edges / edgeList->num_vertices; #if WEIGHTED graphAdjArrayList->max_weight = edgeList->max_weight; #endif Stop(timer); graphAdjArrayListPrintMessageWithtime("Graph AdjArrayList New (Seconds)", Seconds(timer)); Start(timer); graphAdjArrayList = graphAdjArrayListEdgeListProcessOutDegree(graphAdjArrayList, edgeList); Stop(timer); graphAdjArrayListPrintMessageWithtime("Graph AdjArrayList Process OutDegree (Seconds)", Seconds(timer)); Start(timer); graphAdjArrayList = graphAdjArrayListEdgeAllocateOutNodes(graphAdjArrayList); Stop(timer); graphAdjArrayListPrintMessageWithtime("Graph AdjArrayList Allocate Memory (Seconds)", Seconds(timer)); Start(timer); graphAdjArrayList = graphAdjArrayListEdgePopulateOutNodes(graphAdjArrayList, edgeList); Stop(timer); graphAdjArrayListPrintMessageWithtime("Graph AdjArrayList Populate OutNodes (Seconds)", Seconds(timer)); freeEdgeList(edgeList); free(timer); return graphAdjArrayList; } struct GraphAdjArrayList *graphAdjArrayListEdgeListNewWithInverse(struct EdgeList *edgeList, struct EdgeList *inverseEdgeList) { struct Timer *timer = (struct Timer *) my_malloc( sizeof(struct Timer)); struct GraphAdjArrayList *graphAdjArrayList; Start(timer); graphAdjArrayList = graphAdjArrayListGraphNew(edgeList->num_vertices); graphAdjArrayList->num_edges = edgeList->num_edges; graphAdjArrayList->avg_degree = edgeList->num_edges / edgeList->num_vertices; #if WEIGHTED graphAdjArrayList->max_weight = edgeList->max_weight; #endif Stop(timer); graphAdjArrayListPrintMessageWithtime("Graph AdjArrayList New (Seconds)", Seconds(timer)); Start(timer); graphAdjArrayList = graphAdjArrayListEdgeListProcessOutDegree(graphAdjArrayList, edgeList); Stop(timer); graphAdjArrayListPrintMessageWithtime("Graph AdjArrayList Process OutDegree (Seconds)", Seconds(timer)); Start(timer); graphAdjArrayList = graphAdjArrayListEdgeAllocateOutNodes(graphAdjArrayList); Stop(timer); graphAdjArrayListPrintMessageWithtime("Graph AdjArrayList Allocate Memory (Seconds)", Seconds(timer)); Start(timer); graphAdjArrayList = graphAdjArrayListEdgePopulateOutNodes(graphAdjArrayList, edgeList); Stop(timer); graphAdjArrayListPrintMessageWithtime("Graph AdjArrayList Populate OutNodes (Seconds)", Seconds(timer)); freeEdgeList(edgeList); #if DIRECTED Start(timer); graphAdjArrayList = graphAdjArrayListEdgeListProcessInDegree(graphAdjArrayList, inverseEdgeList); Stop(timer); graphAdjArrayListPrintMessageWithtime("Graph AdjArrayList Process InDegree (Seconds)", Seconds(timer)); Start(timer); graphAdjArrayList = graphAdjArrayListEdgeAllocateInodes(graphAdjArrayList); Stop(timer); graphAdjArrayListPrintMessageWithtime("Graph AdjArrayList Allocate Memory (Seconds)", Seconds(timer)); Start(timer); graphAdjArrayList = graphAdjArrayListEdgePopulateInNodes(graphAdjArrayList, inverseEdgeList); Stop(timer); graphAdjArrayListPrintMessageWithtime("Graph AdjArrayList Populate InNodes (Seconds)", Seconds(timer)); freeEdgeList(inverseEdgeList); #endif free(timer); return graphAdjArrayList; } struct GraphAdjArrayList *graphAdjArrayListEdgeListProcessInOutDegree(struct GraphAdjArrayList *graphAdjArrayList, struct EdgeList *edgeList) { uint32_t i; uint32_t src; #if DIRECTED uint32_t dest; #endif #pragma omp parallel for for(i = 0; i < edgeList->num_edges; i++) { src = edgeList->edges_array_src[i]; #pragma omp atomic update graphAdjArrayList->vertices[src].out_degree++; #if DIRECTED dest = edgeList->edges_array_dest[i]; #pragma omp atomic update graphAdjArrayList->vertices[dest].in_degree++; #endif } return graphAdjArrayList; } struct GraphAdjArrayList *graphAdjArrayListEdgeListProcessOutDegree(struct GraphAdjArrayList *graphAdjArrayList, struct EdgeList *edgeList) { uint32_t i; uint32_t src; #pragma omp parallel for for(i = 0; i < edgeList->num_edges; i++) { src = edgeList->edges_array_src[i]; #pragma omp atomic update graphAdjArrayList->vertices[src].out_degree++; } return graphAdjArrayList; } #if DIRECTED struct GraphAdjArrayList *graphAdjArrayListEdgeListProcessInDegree(struct GraphAdjArrayList *graphAdjArrayList, struct EdgeList *inverseEdgeList) { uint32_t i; uint32_t dest; #pragma omp parallel for for(i = 0; i < inverseEdgeList->num_edges; i++) { dest = inverseEdgeList->edges_array_src[i]; #pragma omp atomic update graphAdjArrayList->vertices[dest].in_degree++; } return graphAdjArrayList; } #endif struct GraphAdjArrayList *graphAdjArrayListEdgeAllocate(struct GraphAdjArrayList *graphAdjArrayList) { uint32_t v; #pragma omp parallel for for(v = 0; v < graphAdjArrayList->num_vertices; v++) { adjArrayListCreateNeighbourList(&(graphAdjArrayList->vertices[v])); #if DIRECTED graphAdjArrayList->vertices[v].in_degree = 0; #endif graphAdjArrayList->vertices[v].out_degree = 0; // will be used as an index to edge array outnode } return graphAdjArrayList; } struct GraphAdjArrayList *graphAdjArrayListEdgeAllocateInodes(struct GraphAdjArrayList *graphAdjArrayList) { #if DIRECTED uint32_t v; // #pragma omp parallel for for(v = 0; v < graphAdjArrayList->num_vertices; v++) { adjArrayListCreateNeighbourListInNodes(&(graphAdjArrayList->vertices[v])); graphAdjArrayList->vertices[v].in_degree = 0; } #endif return graphAdjArrayList; } struct GraphAdjArrayList *graphAdjArrayListEdgeAllocateOutNodes(struct GraphAdjArrayList *graphAdjArrayList) { uint32_t v; // #pragma omp parallel for for(v = 0; v < graphAdjArrayList->num_vertices; v++) { adjArrayListCreateNeighbourListOutNodes(&(graphAdjArrayList->vertices[v])); graphAdjArrayList->vertices[v].out_degree = 0; // will be used as an index to edge array outnode } return graphAdjArrayList; } struct GraphAdjArrayList *graphAdjArrayListEdgePopulate(struct GraphAdjArrayList *graphAdjArrayList, struct EdgeList *edgeList) { uint32_t i; uint32_t src; #if DIRECTED uint32_t dest; uint32_t in_degree; #endif uint32_t out_degree; // #pragma omp parallel for for(i = 0; i < edgeList->num_edges; i++) { src = edgeList->edges_array_src[i]; // out_degree = __sync_fetch_and_add(&(graphAdjArrayList->vertices[src].out_degree), 1);y out_degree = graphAdjArrayList->vertices[src].out_degree; graphAdjArrayList->vertices[src].outNodes->edges_array_src[out_degree] = edgeList->edges_array_src[i]; graphAdjArrayList->vertices[src].outNodes->edges_array_dest[out_degree] = edgeList->edges_array_dest[i]; #if WEIGHTED graphAdjArrayList->vertices[src].outNodes->edges_array_weight[out_degree] = edgeList->edges_array_weight[i]; #endif graphAdjArrayList->vertices[src].out_degree++; #if DIRECTED dest = edgeList->edges_array_dest[i]; in_degree = __sync_fetch_and_add(&(graphAdjArrayList->vertices[src].in_degree), 1); graphAdjArrayList->vertices[dest].inNodes->edges_array_src[in_degree] = edgeList->edges_array_src[i]; graphAdjArrayList->vertices[dest].inNodes->edges_array_dest[in_degree] = edgeList->edges_array_dest[i]; #if WEIGHTED graphAdjArrayList->vertices[dest].inNodes->edges_array_weight[in_degree] = edgeList->edges_array_weight[i]; #endif #endif } return graphAdjArrayList; } struct GraphAdjArrayList *graphAdjArrayListEdgePopulateOutNodes(struct GraphAdjArrayList *graphAdjArrayList, struct EdgeList *edgeList) { uint32_t i; uint32_t src; uint32_t out_degree; for(i = 0; i < edgeList->num_edges; i++) { src = edgeList->edges_array_src[i]; out_degree = graphAdjArrayList->vertices[src].out_degree; graphAdjArrayList->vertices[src].outNodes->edges_array_src[out_degree] = edgeList->edges_array_src[i]; graphAdjArrayList->vertices[src].outNodes->edges_array_dest[out_degree] = edgeList->edges_array_dest[i]; #if WEIGHTED graphAdjArrayList->vertices[src].outNodes->edges_array_weight[out_degree] = edgeList->edges_array_weight[i]; #endif graphAdjArrayList->vertices[src].out_degree++; } return graphAdjArrayList; } struct GraphAdjArrayList *graphAdjArrayListEdgePopulateInNodes(struct GraphAdjArrayList *graphAdjArrayList, struct EdgeList *inverseEdgeList) { #if DIRECTED uint32_t i; uint32_t dest; uint32_t in_degree; for(i = 0; i < inverseEdgeList->num_edges; i++) { dest = inverseEdgeList->edges_array_src[i]; in_degree = graphAdjArrayList->vertices[dest].in_degree; graphAdjArrayList->vertices[dest].inNodes->edges_array_src[in_degree] = inverseEdgeList->edges_array_src[i]; graphAdjArrayList->vertices[dest].inNodes->edges_array_dest[in_degree] = inverseEdgeList->edges_array_dest[i]; #if WEIGHTED graphAdjArrayList->vertices[dest].inNodes->edges_array_weight[in_degree] = inverseEdgeList->edges_array_weight[i]; #endif graphAdjArrayList->vertices[dest].in_degree++; } #endif return graphAdjArrayList; } // // A utility function to print the adjacency list // // representation of graphAdjArrayList void graphAdjArrayListPrint(struct GraphAdjArrayList *graphAdjArrayList) { printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "GraphAdjArrayList Properties"); printf(" -----------------------------------------------------\n"); #if WEIGHTED printf("| %-51s | \n", "WEIGHTED"); #else printf("| %-51s | \n", "UN-WEIGHTED"); #endif #if DIRECTED printf("| %-51s | \n", "DIRECTED"); #else printf("| %-51s | \n", "UN-DIRECTED"); #endif printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Average Degree (D)"); printf("| %-51u | \n", graphAdjArrayList->avg_degree); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Number of Vertices (V)"); printf("| %-51u | \n", graphAdjArrayList->num_vertices); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Number of Edges (E)"); printf("| %-51u | \n", graphAdjArrayList->num_edges); printf(" -----------------------------------------------------\n"); // struct AdjArrayList* pCrawl; // uint32_t v; // for (v = 0; v < graphAdjArrayList->num_vertices; v++){ // pCrawl = &(graphAdjArrayList->vertices[v]); // if(pCrawl){ // printf("\n Node : %d \n", v); // adjArrayListPrint(pCrawl); // } // } } void graphAdjArrayListFree(struct GraphAdjArrayList *graphAdjArrayList) { uint32_t v; struct AdjArrayList *pCrawl; for (v = 0; v < graphAdjArrayList->num_vertices; ++v) { pCrawl = &(graphAdjArrayList->vertices[v]); if(pCrawl->outNodes) freeEdgeList(pCrawl->outNodes); #if DIRECTED if(pCrawl->inNodes) freeEdgeList(pCrawl->inNodes); #endif } if(graphAdjArrayList->vertices) free(graphAdjArrayList->vertices); if(graphAdjArrayList) free(graphAdjArrayList); } struct GraphAdjArrayList *graphAdjArrayListPreProcessingStep (struct Arguments *arguments) { struct Timer *timer = (struct Timer *) my_malloc(sizeof(struct Timer)); Start(timer); struct EdgeList *edgeList = readEdgeListsbin(arguments->fnameb, 0, arguments->symmetric, arguments->weighted); Stop(timer); // edgeListPrint(edgeList); graphAdjArrayListPrintMessageWithtime("Read Edge List From File (Seconds)", Seconds(timer)); // Start(timer); edgeList = sortRunAlgorithms(edgeList, arguments->sort); // Stop(timer); // graphAdjArrayListPrintMessageWithtime("Radix Sort Edges By Source (Seconds)",Seconds(timer)); if(arguments->dflag) { Start(timer); edgeList = removeDulpicatesSelfLoopEdges(edgeList); Stop(timer); graphCSRPrintMessageWithtime("Removing duplicate edges (Seconds)", Seconds(timer)); } if(arguments->lmode) { edgeList = reorderGraphProcess(edgeList, arguments); edgeList = sortRunAlgorithms(edgeList, arguments->sort); } // add another layer 2 of reordering to test how DBG affect Gorder, or Gorder affect Rabbit order ...etc arguments->lmode = arguments->lmode_l2; if(arguments->lmode) { edgeList = reorderGraphProcess(edgeList, arguments); edgeList = sortRunAlgorithms(edgeList, arguments->sort); } arguments->lmode = arguments->lmode_l3; if(arguments->lmode) { edgeList = reorderGraphProcess(edgeList, arguments); edgeList = sortRunAlgorithms(edgeList, arguments->sort); } if(arguments->mmode) edgeList = maskGraphProcess(edgeList, arguments); // if(arguments->mmode) // edgeList = maskGraphProcess(edgeList, arguments); #if DIRECTED Start(timer); // struct EdgeList* inverse_edgeList = readEdgeListsbin(fnameb,1); struct EdgeList *inverse_edgeList = readEdgeListsMem(edgeList, 1, 0, 0); Stop(timer); // edgeListPrint(inverse_edgeList); graphAdjArrayListPrintMessageWithtime("Read Inverse Edge List From File (Seconds)", Seconds(timer)); // Start(timer); inverse_edgeList = sortRunAlgorithms(inverse_edgeList, arguments->sort); // Stop(timer); // graphAdjArrayListPrintMessageWithtime("Radix Sort Inverse Edges By Source (Seconds)",Seconds(timer)); #endif #if DIRECTED Start(timer); struct GraphAdjArrayList *graph = graphAdjArrayListEdgeListNewWithInverse(edgeList, inverse_edgeList); Stop(timer); graphAdjArrayListPrintMessageWithtime("Create AdjArrayList from EdgeList (Seconds)", Seconds(timer)); #else Start(timer); struct GraphAdjArrayList *graph = graphAdjArrayListEdgeListNew(edgeList); Stop(timer); graphAdjArrayListPrintMessageWithtime("Create AdjArrayList from EdgeList (Seconds)", Seconds(timer)); #endif graphAdjArrayListPrint(graph); // freeEdgeList(edgeList); // #if DIRECTED // freeEdgeList(inverse_edgeList); // #endif free(timer); return graph; }
Scalar3DUpdater5.h
/* * BCMTools * * Copyright (C) 2011-2013 Institute of Industrial Science, The University of Tokyo. * All rights reserved. * * Copyright (c) 2012-2013 Advanced Institute for Computational Science, RIKEN. * All rights reserved. * */ /// /// @file Scalar3DUpdater5.h /// @brief スカラデータクラス仮想セルアップデータ /// #ifndef SCALAR_3D_UPDATER5_H #define SCALAR_3D_UPDATER5_H #include "BCMTools.h" #include "VCUpdater.h" #include "Scalar3D.h" #ifdef BCMT_NAMESPACE namespace BCMT_NAMESPACE { #endif /// スカラデータクラス仮想セルアップデータ. /// /// @note 通信と補間の順序は,簡単のためL→L+1もL+1→Lも, /// 送信元で補間を行なってから通信. /// /// @todo 補間計算部分をFortranで実装 /// /// template <typename T> class Scalar3DUpdater5 : public VCUpdater { private: Scalar3D<T> *dataClass; ///< 仮想セル同期対象データクラス T *sendBuffer[NUM_FACE][NUM_SUBFACE]; ///< 送信データバッファテーブル T *recvBuffer[NUM_FACE][NUM_SUBFACE]; ///< 受信データバッファテーブル Scalar3D<T> *neighborDataClass[NUM_FACE][NUM_SUBFACE]; ///< 隣接データクラステーブル int nx, ny, nz, vc; public: /// コンストラクタ. /// /// @param[in] neighborInfo 隣接情報配列 /// @param[in] comm MPIコミュニケータ(ディフォルトMPI::COMM_WORLD) /// Scalar3DUpdater5(const NeighborInfo *neighborInfo, const MPI::Comm &comm = MPI::COMM_WORLD) : VCUpdater(neighborInfo, comm) { clearCommBufferPointer(); clearNeighbor(); } /// デストラクタ. ~Scalar3DUpdater5() {} /// 仮想セル同期対象データクラスを登録. void setDataClass(DataClass *dc) { dataClass = dynamic_cast<Scalar3D<T> *>(dc); nx = dataClass->getSizeX(); ny = dataClass->getSizeY(); nz = dataClass->getSizeZ(); vc = dataClass->getVCSize(); } /// 仮想セル同期データ送信に必要なバッファサイズを取得(同レベル間). size_t getSendBufferByteSize(Face face) const { return sizeof(T) * getCommBufferSize(face); } /// 仮想セル同期データ送信に必要なバッファサイズを取得(レベルL+1→L). size_t getSendBufferByteSizeF2C(Face face, Subface subface) const { return sizeof(T) * getCommBufferSize(face) / 4; } /// 仮想セル同期データ送信に必要なバッファサイズを取得(レベルL→L+1). size_t getSendBufferByteSizeC2F(Face face, Subface subface) const { return sizeof(T) * getCommBufferSize(face); } /// 仮想セル同期データ受信に必要なバッファサイズを取得(同レベル間). size_t getRecvBufferByteSize(Face face) const { return sizeof(T) * getCommBufferSize(face); } /// 仮想セル同期データ受信に必要なバッファサイズを取得(レベルL+1→L). size_t getRecvBufferByteSizeF2C(Face face, Subface subface) const { return sizeof(T) * getCommBufferSize(face) / 4; } /// 仮想セル同期データ受信に必要なバッファサイズを取得(レベルL→L+1). size_t getRecvBufferByteSizeC2F(Face face, Subface subface) const { return sizeof(T) * getCommBufferSize(face); } /// 仮想セル同期データ送信バッファ用PointerSetterオブジェクトを取得. PointerSetterBase *getSendBufferPointerSetter(Face face, Subface subface) { return new PointerSetter<T>(&sendBuffer[face][subface]); } /// 仮想セル同期データ受信バッファ用PointerSetterオブジェクトを取得. PointerSetterBase *getRecvBufferPointerSetter(Face face, Subface subface) { return new PointerSetter<T>(&recvBuffer[face][subface]); } public: /// 同並列計算ノード内の隣接データクラスを登録. void setNeighbor(Face face, Subface subface, DataClass *dataClass) { neighborDataClass[face][subface] = dynamic_cast<Scalar3D<T> *>(dataClass); } /// 隣接データクラスの登録解除. void clearNeighbor(Face face, Subface subface) { neighborDataClass[face][subface] = 0; } /// 隣接データクラスの登録解除. void clearNeighbor() { for (int i = 0; i < NUM_FACE; ++i) { for (int j = 0; j < NUM_SUBFACE; ++j) { clearNeighbor(Face(i), Subface(j)); } } } /// 通信バッファテーブルのエントリをクリア. void clearCommBufferPointer(Face face, Subface subface) { sendBuffer[face][subface] = recvBuffer[face][subface] = 0; } /// 通信バッファテーブルをクリア. void clearCommBufferPointer() { for (int i = 0; i < NUM_FACE; ++i) { for (int j = 0; j < NUM_SUBFACE; ++j) { clearCommBufferPointer(Face(i), Subface(j)); } } } private: /// 通信バッファサイズを計算. size_t getCommBufferSize(Face face) const { switch (face) { case X_M: case X_P: return ny * nz * vc; case Y_M: case Y_P: return nz * nx * vc; case Z_M: case Z_P: return nx * ny * vc; default: Exit(EX_FAILURE); } /* NOTREACHED */ } /// レベルL+1→Lの線形補間 (細f(i,j,k) → 粗c(I,J,K)). T interpolateF2C(const Scalar3D<T> &f, int I, int J, int K) { int i = 2 * I; int j = 2 * J; int k = 2 * K; return 0.125 * (f(i, j, k) + f(i + 1, j, k) + f(i, j + 1, k) + f(i + 1, j + 1, k) + f(i, j, k + 1) + f(i + 1, j, k + 1) + f(i, j + 1, k + 1) + f(i + 1, j + 1, k + 1)); } /// レベルL+1→Lの線形補間 (細f(i,j,k) → 粗c(I,J,K)). T interpolateF2C(const T *fData, const Index3DS &fIndex, int I, int J, int K) { int i = 2 * I; int j = 2 * J; int k = 2 * K; return 0.125 * (fData[fIndex(i, j, k)] + fData[fIndex(i + 1, j, k)] + fData[fIndex(i, j + 1, k)] + fData[fIndex(i + 1, j + 1, k)] + fData[fIndex(i, j, k + 1)] + fData[fIndex(i + 1, j, k + 1)] + fData[fIndex(i, j + 1, k + 1)] + fData[fIndex(i + 1, j + 1, k + 1)]); } /// レベルL→L+1の線形補間 (粗c(I,J,K) → 細f(i,j,k)). T interpolateC2F(const Scalar3D<T> &c, int i, int j, int k) { int I, J, K; double r, s, t; linearInterpolate(i, nx, I, r); linearInterpolate(j, ny, J, s); linearInterpolate(k, nz, K, t); return (1.0 - t) * ((1.0 - s) * ((1.0 - r) * c(I, J, K) + r * c(I + 1, J, K)) + s * ((1.0 - r) * c(I, J + 1, K) + r * c(I + 1, J + 1, K))) + t * ((1.0 - s) * ((1.0 - r) * c(I, J, K + 1) + r * c(I + 1, J, K + 1)) + s * ((1.0 - r) * c(I, J + 1, K + 1) + r * c(I + 1, J + 1, K + 1))); } /// レベルL→L+1の線形補間 (粗c(I,J,K) → 細f(i,j,k)). T interpolateC2F(const T *cData, const Index3DS &cIndex, int i, int j, int k) { int I, J, K; double r, s, t; linearInterpolate(i, nx, I, r); linearInterpolate(j, ny, J, s); linearInterpolate(k, nz, K, t); return (1.0 - t) * ((1.0 - s) * ((1.0 - r) * cData[cIndex(I, J, K)] + r * cData[cIndex(I + 1, J, K)]) + s * ((1.0 - r) * cData[cIndex(I, J + 1, K)] + r * cData[cIndex(I + 1, J + 1, K)])) + t * ((1.0 - s) * ((1.0 - r) * cData[cIndex(I, J, K + 1)] + r * cData[cIndex(I + 1, J, K + 1)]) + s * ((1.0 - r) * cData[cIndex(I, J + 1, K + 1)] + r * cData[cIndex(I + 1, J + 1, K + 1)])); } //FEAST.s 勾配計算 T CalcGradient(const Scalar3D<T> &c, int I, int J, int K, int i_shift, int j_shift, int k_shift) { T dp = (c(I, J, K) - c(I + i_shift, J + j_shift, K + k_shift)) * 0.5; return dp; } //FEAST.s 勾配計算 T CalcGradient(const T *cData, const Index3DS &cIndex, int I, int J, int K, int i_shift, int j_shift, int k_shift) { T dp = (cData[cIndex(I, J, K)] - cData[cIndex(I + i_shift, J + j_shift, K + k_shift)]) * 0.5; return dp; } //勾配での補間 T Gradient(const T data, const T dp) { return data + dp; } /// C2F補間における補間パラメータの計算. /// /// @note 端点では,内挿ではなく外挿 /// void linearInterpolate(int i, int n, int &I, double &r) { #if 1 I = std::min(std::max(i / 2 - 1 + i % 2, 0), n - 2); r = -0.25 + 0.5 * i - double(I); #else if (i == 0) { // 外挿 I = 0; r = -0.25; } else if (i == 2 * n - 1) { // 外挿 I = n - 2; r = 1.25; } else if (i % 2 == 0) { I = i / 2 - 1; r = 0.75; } else { I = i / 2; r = 0.25; } #endif } /* /// 隣接データクラスから仮想セルデータをコピー(同レベル間). void copyFromNeighbor(Face face); /// 隣接データクラスから仮想セルデータをコピー(レベルL+1→L). void copyFromNeighborF2C(Face face, Subface subface); /// 隣接データクラスから仮想セルデータをコピー(レベルL→L+1). void copyFromNeighborC2F(Face face, Subface subface); /// 送信バッファに仮想セルデータをコピー(同レベル間). void copyToCommBuffer(Face face); /// 送信バッファに仮想セルデータをコピー(レベルL+1→L). void copyToCommBufferF2C(Face face, Subface subface); /// 送信バッファに仮想セルデータをコピー(レベルL→L+1). void copyToCommBufferC2F(Face face, Subface subface); /// 受信バッファから仮想セルデータをコピー(同レベル間). void copyFromCommBuffer(Face face); /// 受信バッファから仮想セルデータをコピー(レベルL+1→L). void copyFromCommBufferF2C(Face face, Subface subface); /// 受信バッファから仮想セルデータをコピー(レベルL→L+1). void copyFromCommBufferC2F(Face face, Subface subface); void copyFromNeighborF2C_0(int nx, int ny, int nz, int vc, Face face, Subface subface, const T* fData, Index3DS fIndex, T* cData, Index3DS cIndex); void copyFromNeighborC2F_0(int nx, int ny, int nz, int vc, Face face, Subface subface, const T* cData, Index3DS cIndex, T* fData, Index3DS fIndex); void copyToCommBufferC2F_0(int nx, int ny, int nz, int vc, Face face, Subface subface, const T* cData, Index3DS cIndex, T* buffer); void copyToCommBufferF2C_0(int nx, int ny, int nz, int vc, Face face, Subface subface, const T* fData, Index3DS fIndex, T* buffer); */ /// 隣接データクラスから仮想セルデータをコピー(同レベル間). void copyFromNeighbor(Face face) { Scalar3D<T> *dc = neighborDataClass[face][0]; if (!dc) return; switch (face) { case X_M: dataClass->copyFromDataClass(-vc, 0, 0, dc->getSizeX() - vc, 0, 0, vc, ny, nz, dc); break; case X_P: dataClass->copyFromDataClass(nx, 0, 0, 0, 0, 0, vc, ny, nz, dc); break; case Y_M: dataClass->copyFromDataClass(0, -vc, 0, 0, dc->getSizeY() - vc, 0, nx, vc, nz, dc); break; case Y_P: dataClass->copyFromDataClass(0, ny, 0, 0, 0, 0, nx, vc, nz, dc); break; case Z_M: dataClass->copyFromDataClass(0, 0, -vc, 0, 0, dc->getSizeZ() - vc, nx, ny, vc, dc); break; case Z_P: dataClass->copyFromDataClass(0, 0, nz, 0, 0, 0, nx, ny, vc, dc); break; default: break; } } /// 隣接データクラスから仮想セルデータをコピー(レベルL+1→L). void copyFromNeighborF2C(Face face, Subface subface) { T *cData = dataClass->getData(); Index3DS cIndex = dataClass->getIndex(); Scalar3D<T> *f = neighborDataClass[face][subface]; T *fData = f->getData(); Index3DS fIndex = f->getIndex(); copyFromNeighborF2C_0(nx, ny, nz, vc, face, subface, fData, fIndex, cData, cIndex); } /// 隣接データクラスから仮想セルデータをコピー(レベルL→L+1). void copyFromNeighborC2F(Face face, Subface subface) { T *fData = dataClass->getData(); Index3DS fIndex = dataClass->getIndex(); Scalar3D<T> *c = neighborDataClass[face][0]; T *cData = c->getData(); Index3DS cIndex = c->getIndex(); copyFromNeighborC2F_0(nx, ny, nz, vc, face, subface, cData, cIndex, fData, fIndex); } /// 送信バッファに仮想セルデータをコピー(同レベル間). void copyToCommBuffer(Face face) { T *buffer = sendBuffer[face][0]; if (!buffer) return; switch (face) { case X_M: dataClass->copyToBuffer(0, 0, 0, vc, ny, nz, buffer); break; case X_P: dataClass->copyToBuffer(nx - vc, 0, 0, vc, ny, nz, buffer); break; case Y_M: dataClass->copyToBuffer(0, 0, 0, nx, vc, nz, buffer); break; case Y_P: dataClass->copyToBuffer(0, ny - vc, 0, nx, vc, nz, buffer); break; case Z_M: dataClass->copyToBuffer(0, 0, 0, nx, ny, vc, buffer); break; case Z_P: dataClass->copyToBuffer(0, 0, nz - vc, nx, ny, vc, buffer); break; default: break; } } /// 送信バッファに仮想セルデータをコピー(レベルL+1→L). void copyToCommBufferF2C(Face face, Subface subface) { T *buffer = sendBuffer[face][0]; T *fData = dataClass->getData(); Index3DS fIndex = dataClass->getIndex(); copyToCommBufferF2C_0(nx, ny, nz, vc, face, subface, fData, fIndex, buffer); } /// 送信バッファに仮想セルデータをコピー(レベルL→L+1). void copyToCommBufferC2F(Face face, Subface subface) { T *cData = dataClass->getData(); Index3DS cIndex = dataClass->getIndex(); T *buffer = sendBuffer[face][subface]; copyToCommBufferC2F_0(nx, ny, nz, vc, face, subface, cData, cIndex, buffer); } /// 受信バッファから仮想セルデータをコピー(同レベル間). void copyFromCommBuffer(Face face) { T *buffer = recvBuffer[face][0]; if (!buffer) return; switch (face) { case X_M: dataClass->copyFromBuffer(-vc, 0, 0, vc, ny, nz, buffer); break; case X_P: dataClass->copyFromBuffer(nx, 0, 0, vc, ny, nz, buffer); break; case Y_M: dataClass->copyFromBuffer(0, -vc, 0, nx, vc, nz, buffer); break; case Y_P: dataClass->copyFromBuffer(0, ny, 0, nx, vc, nz, buffer); break; case Z_M: dataClass->copyFromBuffer(0, 0, -vc, nx, ny, vc, buffer); break; case Z_P: dataClass->copyFromBuffer(0, 0, nz, nx, ny, vc, buffer); break; default: break; } } /// 受信バッファから仮想セルデータをコピー(レベルL+1→L). void copyFromCommBufferF2C(Face face, Subface subface) { T *buffer = recvBuffer[face][subface]; switch (face) { case X_M: { int j0 = (ny / 2) * subfaceOrigin0(subface); int k0 = (nz / 2) * subfaceOrigin1(subface); dataClass->copyFromBuffer(-vc, j0, k0, vc, ny / 2, nz / 2, buffer); break; } case X_P: { int j0 = (ny / 2) * subfaceOrigin0(subface); int k0 = (nz / 2) * subfaceOrigin1(subface); dataClass->copyFromBuffer(nx, j0, k0, vc, ny / 2, nz / 2, buffer); break; } case Y_M: { int k0 = (nz / 2) * subfaceOrigin0(subface); int i0 = (nx / 2) * subfaceOrigin1(subface); dataClass->copyFromBuffer(i0, -vc, k0, nx / 2, vc, nz / 2, buffer); break; } case Y_P: { int k0 = (nz / 2) * subfaceOrigin0(subface); int i0 = (nx / 2) * subfaceOrigin1(subface); dataClass->copyFromBuffer(i0, ny, k0, nx / 2, vc, nz / 2, buffer); break; } case Z_M: { int i0 = (nx / 2) * subfaceOrigin0(subface); int j0 = (ny / 2) * subfaceOrigin1(subface); dataClass->copyFromBuffer(i0, j0, -vc, nx / 2, ny / 2, vc, buffer); break; } case Z_P: { int i0 = (nx / 2) * subfaceOrigin0(subface); int j0 = (ny / 2) * subfaceOrigin1(subface); dataClass->copyFromBuffer(i0, j0, nz, nx / 2, ny / 2, vc, buffer); break; } default: break; } } /// 受信バッファから仮想セルデータをコピー(レベルL→L+1). void copyFromCommBufferC2F(Face face, Subface subface) { //FEAST.s //copyFromCommBuffer(face); copyFromCommBufferC2F_0(face); //FEAST.e } void copyFromCommBufferC2F_0(Face face) { //FEAST.s T *fData = dataClass->getData(); Index3DS fIndex = dataClass->getIndex(); T *buffer = recvBuffer[face][0]; if (!buffer) return; switch (face) { case X_M: #pragma omp parallel for if (nz >= 16) for (int k = 0; k < nz; ++k) { for (int j = 0; j < ny; ++j) { for (int i = 0; i < vc; ++i) { fData[fIndex(-1 - i, j, k)] = fData[fIndex(0 - i, j, k)] + buffer[i + vc * j + (vc * ny) * k]; } } } break; case X_P: #pragma omp parallel for if (nz >= 16) for (int k = 0; k < nz; ++k) { for (int j = 0; j < ny; ++j) { for (int i = nx; i < nx + vc; ++i) { fData[fIndex(i, j, k)] = fData[fIndex(i - 1, j, k)] + buffer[i - nx + vc * j + (vc * ny) * k]; } } } break; case Y_M: #pragma omp parallel for if (nz >= 16) for (int k = 0; k < nz; ++k) { for (int j = 0; j < vc; ++j) { for (int i = 0; i < nx; ++i) { fData[fIndex(i, -1 - j, k)] = fData[fIndex(i, 0 - j, k)] + buffer[i + nx * j + (nx * vc) * k]; } } } break; case Y_P: #pragma omp parallel for if (nz >= 16) for (int k = 0; k < nz; ++k) { for (int j = ny; j < ny + vc; ++j) { for (int i = 0; i < nx; ++i) { fData[fIndex(i, j, k)] = fData[fIndex(i, j - 1, k)] + buffer[i + nx * (j - ny) + (nx * vc) * k]; } } } break; case Z_M: #pragma omp parallel for if (nz >= 16) for (int k = 0; k < vc; ++k) { for (int j = 0; j < ny; ++j) { for (int i = 0; i < nx; ++i) { fData[fIndex(i, j, -1 - k)] = fData[fIndex(i, j, 0 - k)] + buffer[i + nx * j + (nx * ny) * k]; } } } break; case Z_P: #pragma omp parallel for if (nz >= 16) for (int k = nz; k < nz + vc; ++k) { for (int j = 0; j < ny; ++j) { for (int i = 0; i < nx; ++i) { fData[fIndex(i, j, k)] = fData[fIndex(i, j, k - 1)] + buffer[i + nx * j + (nx * ny) * (k - nz)]; } } } break; default: break; } //FEAST.e } void copyFromNeighborF2C_0(int nx, int ny, int nz, int vc, Face face, Subface subface, const T *fData, Index3DS fIndex, T *cData, Index3DS cIndex) { switch (face) { case X_M: { int j0 = (ny / 2) * subfaceOrigin0(subface); int k0 = (nz / 2) * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int k = 0; k < nz / 2; k++) { for (int j = 0; j < ny / 2; j++) { for (int i = 0; i < vc; i++) { cData[cIndex(i - vc, j + j0, k + k0)] = interpolateF2C(fData, fIndex, i + nx / 2 - vc, j, k); } } } break; } case X_P: { int j0 = (ny / 2) * subfaceOrigin0(subface); int k0 = (nz / 2) * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int k = 0; k < nz / 2; k++) { for (int j = 0; j < ny / 2; j++) { for (int i = 0; i < vc; i++) { cData[cIndex(i + nx, j + j0, k + k0)] = interpolateF2C(fData, fIndex, i, j, k); } } } break; } case Y_M: { int k0 = (nz / 2) * subfaceOrigin0(subface); int i0 = (nx / 2) * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int k = 0; k < nz / 2; k++) { for (int j = 0; j < vc; j++) { for (int i = 0; i < nx / 2; i++) { cData[cIndex(i + i0, j - vc, k + k0)] = interpolateF2C(fData, fIndex, i, j + ny / 2 - vc, k); } } } break; } case Y_P: { int k0 = (nz / 2) * subfaceOrigin0(subface); int i0 = (nx / 2) * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int k = 0; k < nz / 2; k++) { for (int j = 0; j < vc; j++) { for (int i = 0; i < nx / 2; i++) { cData[cIndex(i + i0, j + ny, k + k0)] = interpolateF2C(fData, fIndex, i, j, k); } } } break; } case Z_M: { int i0 = (nx / 2) * subfaceOrigin0(subface); int j0 = (ny / 2) * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int k = 0; k < vc; k++) { for (int j = 0; j < ny / 2; j++) { for (int i = 0; i < nx / 2; i++) { cData[cIndex(i + i0, j + j0, k - vc)] = interpolateF2C(fData, fIndex, i, j, k + nz / 2 - vc); } } } break; } case Z_P: { int i0 = (nx / 2) * subfaceOrigin0(subface); int j0 = (ny / 2) * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int k = 0; k < vc; k++) { for (int j = 0; j < ny / 2; j++) { for (int i = 0; i < nx / 2; i++) { cData[cIndex(i + i0, j + j0, k + nz)] = interpolateF2C(fData, fIndex, i, j, k); } } } break; } default: break; } } void copyFromNeighborC2F_0(int nx, int ny, int nz, int vc, Face face, Subface subface, const T *cData, Index3DS cIndex, T *fData, Index3DS fIndex) { switch (face) { case X_M: { int J0 = ny * subfaceOrigin0(subface); int K0 = nz * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < nz; K++) { for (int J = 0; J < ny; J++) { for (int I = 0; I < vc; I++) { T dp = CalcGradient(cData, cIndex, nx - 1, J / 2 + J0 / 2, K / 2 + K0 / 2, 1, 0, 0); fData[fIndex(-1 - I, J, K)] = Gradient(fData[fIndex(0 - I, J, K)], dp); } } } break; } case X_P: { int J0 = ny * subfaceOrigin0(subface); int K0 = nz * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < nz; K++) { for (int J = 0; J < ny; J++) { for (int I = 0; I < vc; I++) { T dp = CalcGradient(cData, cIndex, 0, J / 2 + J0 / 2, K / 2 + K0 / 2, -1, 0, 0); fData[fIndex(nx + I, J, K)] = Gradient(fData[fIndex(nx - 1 + I, J, K)], dp); } } } break; } case Y_M: { int K0 = nz * subfaceOrigin0(subface); int I0 = nx * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < nz; K++) { for (int J = 0; J < vc; J++) { for (int I = 0; I < nx; I++) { T dp = CalcGradient(cData, cIndex, I / 2 + I0 / 2, ny - 1, K / 2 + K0 / 2, 0, 1, 0); fData[fIndex(I, -1 - J, K)] = Gradient(fData[fIndex(I, 0 - J, K)], dp); } } } break; } case Y_P: { int K0 = nz * subfaceOrigin0(subface); int I0 = nx * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < nz; K++) { for (int J = 0; J < vc; J++) { for (int I = 0; I < nx; I++) { T dp = CalcGradient(cData, cIndex, I / 2 + I0 / 2, 0, K / 2 + K0 / 2, 0, -1, 0); fData[fIndex(I, ny + J, K)] = Gradient(fData[fIndex(I, ny - 1 + J, K)], dp); } } } break; } case Z_M: { int I0 = nx * subfaceOrigin0(subface); int J0 = ny * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < vc; K++) { for (int J = 0; J < ny; J++) { for (int I = 0; I < nx; I++) { T dp = CalcGradient(cData, cIndex, I / 2 + I0 / 2, J / 2 + J0 / 2, nz - 1, 0, 0, 1); fData[fIndex(I, J, -1 - K)] = Gradient(fData[fIndex(I, J, 0 - K)], dp); } } } break; } case Z_P: { int I0 = nx * subfaceOrigin0(subface); int J0 = ny * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < vc; K++) { for (int J = 0; J < ny; J++) { for (int I = 0; I < nx; I++) { T dp = CalcGradient(cData, cIndex, I / 2 + I0 / 2, J / 2 + J0 / 2, 0, 0, 0, -1); fData[fIndex(I, J, nz + K)] = Gradient(fData[fIndex(I, J, nz - 1 + K)], dp); } } } break; } default: break; } } void copyToCommBufferC2F_0(int nx, int ny, int nz, int vc, Face face, Subface subface, const T *cData, Index3DS cIndex, T *buffer) { int ii = 0; switch (face) { case X_M: { int J0 = ny * subfaceOrigin0(subface); int K0 = nz * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < nz; K++) { for (int J = 0; J < ny; J++) { for (int I = 0; I < vc; I++) { int m = I + vc * (J + ny * K); buffer[m] = CalcGradient(cData, cIndex, 0, J / 2 + J0 / 2, K / 2 + K0 / 2, -1, 0, 0); } } } break; } case X_P: { int J0 = ny * subfaceOrigin0(subface); int K0 = nz * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < nz; K++) { for (int J = 0; J < ny; J++) { for (int I = 0; I < vc; I++) { int m = I + vc * (J + ny * K); buffer[m] = CalcGradient(cData, cIndex, nx - 1, J / 2 + J0 / 2, K / 2 + K0 / 2, 1, 0, 0); } } } break; } case Y_M: { int K0 = nz * subfaceOrigin0(subface); int I0 = nx * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < nz; K++) { for (int J = 0; J < vc; J++) { for (int I = 0; I < nx; I++) { int m = I + nx * (J + vc * K); buffer[m] = CalcGradient(cData, cIndex, I / 2 + I0 / 2, 0, K / 2 + K0 / 2, 0, -1, 0); } } } break; } case Y_P: { int K0 = nz * subfaceOrigin0(subface); int I0 = nx * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < nz; K++) { for (int J = 0; J < vc; J++) { for (int I = 0; I < nx; I++) { int m = I + nx * (J + vc * K); buffer[m] = CalcGradient(cData, cIndex, I / 2 + I0 / 2, ny - 1, K / 2 + K0 / 2, 0, 1, 0); } } } break; } case Z_M: { int I0 = nx * subfaceOrigin0(subface); int J0 = ny * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < vc; K++) { for (int J = 0; J < ny; J++) { for (int I = 0; I < nx; I++) { int m = I + nx * (J + ny * K); buffer[m] = CalcGradient(cData, cIndex, I / 2 + I0 / 2, J / 2 + J0 / 2, 0, 0, 0, -1); } } } break; } case Z_P: { int I0 = nx * subfaceOrigin0(subface); int J0 = ny * subfaceOrigin1(subface); #pragma omp parallel for collapse(3) for (int K = 0; K < vc; K++) { for (int J = 0; J < ny; J++) { for (int I = 0; I < nx; I++) { int m = I + nx * (J + ny * K); buffer[m] = CalcGradient(cData, cIndex, I / 2 + I0 / 2, J / 2 + J0 / 2, nz - 1, 0, 0, 1); } } } break; } default: break; } } void copyToCommBufferF2C_0(int nx, int ny, int nz, int vc, Face face, Subface subface, const T *fData, Index3DS fIndex, T *buffer) { int ii = 0; switch (face) { case X_M: { #pragma omp parallel for collapse(3) for (int k = 0; k < nz / 2; k++) { for (int j = 0; j < ny / 2; j++) { for (int i = 0; i < vc; i++) { int m = i + vc * (j + ny / 2 * k); buffer[m] = interpolateF2C(fData, fIndex, i, j, k); } } } break; } case X_P: { #pragma omp parallel for collapse(3) for (int k = 0; k < nz / 2; k++) { for (int j = 0; j < ny / 2; j++) { for (int i = 0; i < vc; i++) { int m = i + vc * (j + ny / 2 * k); buffer[m] = interpolateF2C(fData, fIndex, i + nx / 2 - vc, j, k); } } } break; } case Y_M: { #pragma omp parallel for collapse(3) for (int k = 0; k < nz / 2; k++) { for (int j = 0; j < vc; j++) { for (int i = 0; i < nx / 2; i++) { int m = i + nx / 2 * (j + vc * k); buffer[m] = interpolateF2C(fData, fIndex, i, j, k); } } } break; } case Y_P: { #pragma omp parallel for collapse(3) for (int k = 0; k < nz / 2; k++) { for (int j = 0; j < vc; j++) { for (int i = 0; i < nx / 2; i++) { int m = i + nx / 2 * (j + vc * k); buffer[m] = interpolateF2C(fData, fIndex, i, j + ny / 2 - vc, k); } } } break; } case Z_M: { #pragma omp parallel for collapse(3) for (int k = 0; k < vc; k++) { for (int j = 0; j < ny / 2; j++) { for (int i = 0; i < nx / 2; i++) { int m = i + nx / 2 * (j + ny / 2 * k); buffer[m] = interpolateF2C(fData, fIndex, i, j, k); } } } break; } case Z_P: { #pragma omp parallel for collapse(3) for (int k = 0; k < vc; k++) { for (int j = 0; j < ny / 2; j++) { for (int i = 0; i < nx / 2; i++) { int m = i + nx / 2 * (j + ny / 2 * k); buffer[m] = interpolateF2C(fData, fIndex, i, j, k + nz / 2 - vc); } } } break; } default: break; } } }; #ifdef BCMT_NAMESPACE } // namespace BCMT_NAMESPACE #endif #endif // SCALAR_3D_UPDATER_H
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; struct OMPTraitProperty; struct OMPTraitSelector; struct OMPTraitSet; class OMPTraitInfo; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class ParsingOpenMPDirectiveRAII; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> FloatControlHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFenvAccessHandler; std::unique_ptr<PragmaHandler> STDCFenvRoundHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// Parsing OpenMP directive mode. bool OpenMPDirectiveParsing = false; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// Current kind of OpenMP clause OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; void MaybeDestroyTemplateIds() { if (!TemplateIds.empty() && (Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens())) DestroyTemplateIds(); } void DestroyTemplateIds(); /// RAII object to destroy TemplateIdAnnotations where possible, from a /// likely-good position during parsing. struct DestroyTemplateIdAnnotationsRAIIObj { Parser &Self; DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {} ~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); } }; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// Handle the annotation token produced for /// #pragma STDC FENV_ROUND... void HandlePragmaFEnvRound(); /// Handle the annotation token produced for /// #pragma float_control void HandlePragmaFloatControl(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static TypeResult getTypeAnnotation(const Token &Tok) { if (!Tok.getAnnotationValue()) return TypeError(); return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, TypeResult T) { assert((T.isInvalid() || T.get()) && "produced a valid-but-null type annotation?"); Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr()); } static NamedDecl *getNonTypeAnnotation(const Token &Tok) { return static_cast<NamedDecl*>(Tok.getAnnotationValue()); } static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) { Tok.setAnnotationValue(ND); } static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) { return static_cast<IdentifierInfo*>(Tok.getAnnotationValue()); } static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) { Tok.setAnnotationValue(ND); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); bool MightBeCXXScopeToken() { return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) || (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) || Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super); } bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) { return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext); } private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevPreferredType = P.PreferredType; PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); /// Kinds of compound pseudo-tokens formed by a sequence of two real tokens. enum class CompoundToken { /// A '(' '{' beginning a statement-expression. StmtExprBegin, /// A '}' ')' ending a statement-expression. StmtExprEnd, /// A '[' '[' beginning a C++11 or C2x attribute. AttrBegin, /// A ']' ']' ending a C++11 or C2x attribute. AttrEnd, /// A '::' '*' forming a C++ pointer-to-member declaration. MemberPtr, }; /// Check that a compound operator was written in a "sensible" way, and warn /// if not. void checkCompoundToken(SourceLocation FirstTokLoc, tok::TokenKind FirstTokKind, CompoundToken Op); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// Introduces zero or more scopes for parsing. The scopes will all be exited /// when the object is destroyed. class MultiParseScope { Parser &Self; unsigned NumScopes = 0; MultiParseScope(const MultiParseScope&) = delete; public: MultiParseScope(Parser &Self) : Self(Self) {} void Enter(unsigned ScopeFlags) { Self.EnterScope(ScopeFlags); ++NumScopes; } void Exit() { while (NumScopes) { Self.ExitScope(); --NumScopes; } } ~MultiParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); /// Re-enter the template scopes for a declaration that might be a template. unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. Balances (), [], and {} delimiter tokens while /// skipping. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); /// The location of the first statement inside an else that might /// have a missleading indentation. If there is no /// MisleadingIndentationChecker on an else active, this location is invalid. SourceLocation MisleadingIndentationElseLoc; private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); virtual void ParseLexedPragmas(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; void ParseLexedPragmas() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; /// Contains the lexed tokens of a pragma with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. class LateParsedPragma : public LateParsedDeclaration { Parser *Self = nullptr; AccessSpecifier AS = AS_none; CachedTokens Toks; public: explicit LateParsedPragma(Parser *P, AccessSpecifier AS) : Self(P), AS(AS) {} void takeToks(CachedTokens &Cached) { Toks.swap(Cached); } const CachedTokens &toks() const { return Toks; } AccessSpecifier getAccessSpecifier() const { return AS; } void ParseLexedPragmas() override; }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser *Self; /// Method - The method declaration. Decl *Method; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) {} /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; // In ParseCXXInlineMethods.cpp. struct ReenterTemplateScopeRAII; struct ReenterClassScopeRAII; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); void ParseLexedPragmas(ParsingClass &Class); void ParseLexedPragma(LateParsedPragma &LP); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; struct ParsedAttributesViewWithRange : ParsedAttributesView { ParsedAttributesViewWithRange() : ParsedAttributesView() {} void clearListOnly() { ParsedAttributesView::clearListOnly(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc is filled with the location of the last token of the simple-asm. ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc); ExprResult ParseAsmStringLiteral(bool ForAsmLabel); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); ExprResult ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause); ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); /// Control what ParseCastExpression will parse. enum CastParseKind { AnyCastExpr = 0, UnaryExprOnly, PrimaryExprOnly }; ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHasErrors, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false, bool InUsingDeclaration = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C++ Concepts ExprResult ParseRequiresExpression(); void ParseTrailingRequiresClause(Declarator &D); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator( llvm::function_ref<void(const Designation &)> CodeCompleteCB); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK, SourceLocation *LParenLoc = nullptr, SourceLocation *RParenLoc = nullptr); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Whether a defining-type-specifier is permitted in a given context. enum class AllowDefiningTypeSpec { /// The grammar doesn't allow a defining-type-specifier here, and we must /// not parse one (eg, because a '{' could mean something else). No, /// The grammar doesn't allow a defining-type-specifier here, but we permit /// one for error recovery purposes. Sema will reject. NoButErrorRecovery, /// The grammar allows a defining-type-specifier here, even though it's /// always invalid. Sema will reject. YesButInvalid, /// The grammar allows a defining-type-specifier here, and one can be valid. Yes }; /// Is this a context in which we are parsing defining-type-specifiers (and /// so permit class and enum definitions in addition to non-defining class and /// enum elaborated-type-specifiers)? static AllowDefiningTypeSpec isDefiningTypeSpecifierContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: return AllowDefiningTypeSpec::Yes; case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: return AllowDefiningTypeSpec::YesButInvalid; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: return AllowDefiningTypeSpec::NoButErrorRecovery; case DeclSpecContext::DSC_trailing: return AllowDefiningTypeSpec::No; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which an opaque-enum-declaration can appear? static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: return true; case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, SourceLocation *DeclSpecStart = nullptr); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr, SourceLocation *DeclSpecStart = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, RecordDecl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Determine whether we could have an enum-base. /// /// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise /// only consider this to be an enum-base if the next token is a '{'. /// /// \return \c false if this cannot possibly be an enum base; \c true /// otherwise. bool isEnumBase(bool AllowSemi); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an '(' after an 'explicit' keyword is part of a C++20 /// 'explicit(bool)' declaration, in earlier language modes where that is an /// extension. TPResult isExplicitBool(); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); /// Try to skip a possibly empty sequence of 'attribute-specifier's without /// full validation of the syntactic structure of attributes. bool TrySkipAttributes(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } bool MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); return true; } return false; } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); ExprResult ParseExtIntegerArgument(); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); void InitCXXThisScopeForDeclaratorIfRelevant( const Declarator &D, const DeclSpec &DS, llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( DeclaratorContext DeclaratorContext, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse a property kind into \p TIProperty for the selector set \p Set and /// selector \p Selector. void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty, llvm::omp::TraitSet Set, llvm::omp::TraitSelector Selector, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector kind into \p TISelector for the selector set \p Set. void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector set kind into \p TISet. void parseOMPTraitSetKind(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context property. void parseOMPContextProperty(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context selector. void parseOMPContextSelector(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &SeenSelectors); /// Parses an OpenMP context selector set. void parseOMPContextSelectorSet(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &SeenSets); /// Parses OpenMP context selectors. bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI); /// Parse a `match` clause for an '#pragma omp declare variant'. Return true /// if there was an error. bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI, OMPTraitInfo *ParentTI); /// Parse clauses for '#pragma omp declare variant'. void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse clauses for '#pragma omp declare target'. DeclGroupPtrTy ParseOMPDeclareTargetClauses(); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if /// it is not the current token. void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind); /// Check the \p FoundKind against the \p ExpectedKind, if not issue an error /// that the "end" matching the "begin" directive of kind \p BeginKind was not /// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd /// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`. void parseOMPEndDirective(OpenMPDirectiveKind BeginKind, OpenMPDirectiveKind ExpectedKind, OpenMPDirectiveKind FoundKind, SourceLocation MatchingLoc, SourceLocation FoundLoc, bool SkipUntilOpenMPEnd); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Tries to parse cast part of OpenMP array shaping operation: /// '[' expression ']' { '[' expression ']' } ')'. bool tryParseOpenMPArrayShapingCastPart(); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param DKind Directive kind. /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses and creates OpenMP 5.0 iterators expression: /// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier = /// <range-specification> }+ ')' ExprResult ParseOpenMPIteratorsExpr(); /// Parses allocators and traits in the context of the uses_allocator clause. /// Expected format: /// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')' OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc, bool IsAddressOfOperand = false); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *DepModOrTailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or ///< lastprivate clause. SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers> MapTypeModifiers; SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers> MapTypeModifiersLoc; SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers> MotionModifiers; SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc; bool IsMapTypeImplicit = false; SourceLocation ExtraModifierLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); TPResult isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); bool isTypeConstraintAnnotation(); bool TryAnnotateTypeConstraint(); NamedDecl * ParseConstrainedTemplateTypeParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc, SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true, bool TypeConstraint = false); void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS, bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; class GNUAsmQualifiers { unsigned Qualifiers = AQ_unspecified; public: enum AQ { AQ_unspecified = 0, AQ_volatile = 1, AQ_inline = 2, AQ_goto = 4, }; static const char *getQualifierName(AQ Qualifier); bool setAsmQualifier(AQ Qualifier); inline bool isVolatile() const { return Qualifiers & AQ_volatile; }; inline bool isInline() const { return Qualifiers & AQ_inline; }; inline bool isGoto() const { return Qualifiers & AQ_goto; } }; bool isGCCAsmStatement(const Token &TokAfterAsm) const; bool isGNUAsmQualifier(const Token &TokAfterAsm) const; GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const; bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ); }; } // end namespace clang #endif
opencl_7z_fmt_plug.c
/* * This software is Copyright (c) 2015-2017 magnum * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ /* * We've seen one single sample where we could not trust the padding check * (early rejection). To be able to crack such hashes, define this to 0. * This hits performance in some cases. */ #define TRUST_PADDING 0 #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_sevenzip; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_sevenzip); #else #include <stdint.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "misc.h" #include "common-opencl.h" #include "options.h" #include "aes.h" #include "crc32.h" #include "unicode.h" #include "dyna_salt.h" #include "lzma/LzmaDec.h" #include "lzma/Lzma2Dec.h" #include "memdbg.h" #define FORMAT_LABEL "7z-opencl" #define FORMAT_NAME "7-Zip" #define FORMAT_TAG "$7z$" #define TAG_LENGTH (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "SHA256 AES OPENCL" #define BENCHMARK_COMMENT " (512K iterations)" #define BENCHMARK_LENGTH -1000 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define PLAINTEXT_LENGTH ((55-8)/2) // 23, rar3 uses 22 #define UNICODE_LENGTH (2 * PLAINTEXT_LENGTH) #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_SIZE sizeof(struct custom_salt*) #define SALT_ALIGN sizeof(struct custom_salt*) typedef struct { uint32_t length; uint16_t v[PLAINTEXT_LENGTH]; } sevenzip_password; typedef struct { uint32_t key[32/4]; uint32_t round; uint32_t reject; } sevenzip_hash; typedef struct { size_t length; size_t unpacksize; uint32_t iterations; //uint32_t salt_size; //uint8_t salt[16]; uint8_t data[32]; /* Last two blocks of data */ } sevenzip_salt; typedef struct { cl_uint total[2]; cl_uint state[8]; cl_uchar buffer[64]; } SHA256_CTX; typedef struct { cl_ulong t; SHA256_CTX ctx; cl_uint len; cl_ushort buffer[PLAINTEXT_LENGTH]; } sevenzip_state; static int *cracked; static int any_cracked; static int new_keys; static struct custom_salt { dyna_salt dsalt; size_t length; /* used in decryption */ size_t unpacksize; /* used in padding check */ size_t crc_len; /* used in CRC calculation */ int NumCyclesPower; int SaltSize; int ivSize; int type; unsigned char iv[16]; unsigned char salt[16]; unsigned int crc; unsigned char props[LZMA_PROPS_SIZE]; unsigned char data[1]; } *cur_salt; static struct fmt_tests sevenzip_tests[] = { /* CRC checks passes for this hash (4 bytes of padding) */ {"$7z$128$19$0$1122$8$a264c94f2cd72bec0000000000000000$725883103$112$108$64749c0963e20c74602379ca740165b9511204619859d1914819bc427b7e5f0f8fc67f53a0b53c114f6fcf4542a28e4a9d3914b4bc76baaa616d6a7ec9efc3f051cb330b682691193e6fa48159208329460c3025fb273232b82450645f2c12a9ea38b53a2331a1d0858813c8bf25a831", "openwall"}, /* LZMA before CRC (9 bytes of padding) */ {"$7z$1$19$0$1122$8$732b59fd26896e410000000000000000$2955316379$192$183$7544a3a7ec3eb99a33d80e57907e28fb8d0e140ec85123cf90740900429136dcc8ba0692b7e356a4d4e30062da546a66b92ec04c64c0e85b22e3c9a823abef0b57e8d7b8564760611442ecceb2ca723033766d9f7c848e5d234ca6c7863a2683f38d4605322320765938049305655f7fb0ad44d8781fec1bf7a2cb3843f269c6aca757e509577b5592b60b8977577c20aef4f990d2cb665de948004f16da9bf5507bf27b60805f16a9fcc4983208297d3affc4455ca44f9947221216f58c337f$232$5d00000100", "password"}, /* CRC checks passes for this hash (no padding) */ {"$7z$0$19$0$1122$8$d1f50227759415890000000000000000$1412385885$112$112$5e5b8b734adf52a64c541a5a5369023d7cccb78bd910c0092535dfb013a5df84ac692c5311d2e7bbdc580f5b867f7b5dd43830f7b4f37e41c7277e228fb92a6dd854a31646ad117654182253706dae0c069d3f4ce46121d52b6f20741a0bb39fc61113ce14d22f9184adafd6b5333fb1", "password"}, /* This requires LZMA (no padding) */ {"$7z$1$19$0$1122$8$5fdbec1569ff58060000000000000000$2465353234$112$112$58ba7606aafc7918e3db7f6e0920f410f61f01e9c1533c40850992fee4c5e5215bc6b4ea145313d0ac065b8ec5b47d9fb895bb7f97609be46107d71e219544cfd24b52c2ecd65477f72c466915dcd71b80782b1ac46678ab7f437fd9f7b8e9d9fad54281d252de2a7ae386a65fc69eda$176$5d00000100", "password"}, /* Length checks */ #if DEBUG {"$7z$128$19$0$1122$8$94fb9024fdd3e6c40000000000000000$3965424295$112$99$1127828817ff126bc45ff3c5225d9d0c5d00a52094909674e6ed3dc431546d9a672738f2fa07556340d604d2efd2901b9d2ac2c0686c25af9c520c137b16c50c54df8703fd0b0606fa721ad70aafb9c4e3b288ef49864e6034021969b4ce11e3b8e269a92090ccf593c6a0da06262116", ""}, {"$7z$128$19$0$1122$8$6fd059d516d5490f0000000000000000$460747259$112$99$af163eb5532c557efca78fbb448aa04f348cd258c94233e6669f4e5025f220274c244d4f2347a7512571d9b6015a1e1a90e281983b743da957437b33092eddb55a5bc76f3ab6c7dbabb001578d1043285f5fa791fd94dd9779b461e44cbfe869f891007335b766774ccee3813ec8cd57", "&"}, {"$7z$128$19$0$1122$8$6d4a12af68d83bfe0000000000000000$993697592$112$99$7c308faa36b667599ee4418435ab621884c5c115ee3b70be454fe99236422f4f2d5cd9c8fcfbe6b6b0805ee602ce8488a08f7ea14a4f5c0c060fc685bff187720a402b23a5cfe3c9c5a5ae07f91209031b8f9804ac10459e15a0158031f6c58e507401ec6e1e6de8f64d94201159432b", "&'"}, {"$7z$128$19$0$1122$8$7527d758a59181830000000000000000$3917710544$112$99$61a9ca9e835bd0f2dc474b34d5d89bcf8cd1bb071a984ee1dcf224174a60bcee140fcf2fde8927fe4f3f4eb4a2cc39faff73f1898ae25cc92bd02939f4317ebb173bf3b6f01eef183163ddd533ad5c076f87341bd8b86d8460c68fc390aa8df89fc4076bdfd24e157f6c07e105c07612", "&'("}, {"$7z$128$19$0$1122$8$68928bade860a2b80000000000000000$3235890186$112$99$4b685a569c3aed78d217bae9ec64fa06b614df55c1cb0d160563d87efe38813accb38dd7037f86cebc91751c2488769c7398dfefaf491c024f2d640dcb388a56404cd5ac475ba16b5f8206fa45d5923b3a0c8dd0f24460ccee0d93bea03ad58b8a8db502a55ba1775560b3d194f342f7", "&'()"}, {"$7z$128$19$0$1122$8$81931b9ba0b069820000000000000000$3094344848$112$99$fdbb2622143d25b13992b1467ce9edce4e3df8ca07535735b76e8abcb0791e384a1d5547483e19c3bd6e5a0742d29c403cfc8b3a003b285e80b350ea9157600eb91c49b329903de9ec9b17d1c95b0e136b579e165a6e80550464fa99830bfd9ee58fc14516b614ff9f84ec80e6880a36", "&'()*"}, {"$7z$128$19$0$1122$8$ccf696913989510d0000000000000000$1238556212$112$99$647264fbc665e73ecfe3ef7055fef0d91cb86833d6df08b2f7a3c1c89cf7cdaa09a802c8bfb2e5c6b55143a315df74d841b349fc8b43613d0f87cc90325fd56fc17ee08df7ce76cdc9cda61bd4d5632e20af3db16e921c755174f291c0aa6581844def4547380e2dd4a574435d17e1e8", "&'()*+"}, {"$7z$128$19$0$1122$8$d618bd3ec8bafd800000000000000000$1349785$112$99$6514e2e7468e6f0ed63796cfc0588ac2d75f024c4a0fa03778bd252d316d03e48a08ffcc0011725ad4f867e9a9666630dff4f352c59bcbadb94b9d0e2c42d653b80f480005ce868a0b1a075b2e00abd743de0867d69cdc8b56c7f9770537d50e6bb11eb0d2d7d8b6af5dd8ecb50ab553", "&'()*+,"}, {"$7z$128$19$0$1122$8$1c1586d191f190890000000000000000$642253888$112$99$f55cf9ab802b10a83471abe9319711ae79906cd6921365167c389470a3a8a72b0d877379daae2c24ea2258e8586f12d5036aff9ddc8e26861467b0843ffb72e4410c2be76ec111d37f875c81b244ed172f1f4765a220d830a9615787e9d07f8582146556e9c566b64897a47d18a82b36", "&'()*+,-"}, {"$7z$128$19$0$1122$8$0df03cbdbc73e22a0000000000000000$3194757927$112$99$df53e9d8b4e02cf2962ad87912021508a36910c399a7abc4a3a5423fa2184816af7172418eb4763924ec8b099b7ca95abdc6faac9aaa6e181ffa60b7e8bdb2bf576536ca69152e3b6b97302c796bbc9dec78db6ba7a4a58e68f8ee28f27dea26bd4f848dc3a3315e97e1463b5c171ce5", "&'()*+,-."}, {"$7z$128$19$0$1122$8$7785351cf9fe5dfa0000000000000000$1304801610$112$99$7b35280384726da8521fee0786ef43e0aa621394a6f015b65cbd7f1329f43c4543b8a451a0007c03a3ce3f61e639c54ede3e580600b113777822b6d562390d14ed236e5bac3d3af63ae23015148a95e7ccbc9eea653b52c606ca09ec51fd2b0c4cfc2b760fccc1fe0ccdd9ee3fcb8129", "&'()*+,-./"}, {"$7z$128$19$0$1122$8$70eb7f4b821cf5310000000000000000$3381356868$112$99$c26db2cb89df1237f323d92044726d03cfc7ba83115e789243c3b2570ae674d8356a23e004b103638b1ea9fe6ff5db844a1ddcaaed8a71a8d8e343f73868b4acafd34d493345439b0e0be87d2cf52eb4cceaafcff0dfaf9cf25080693ede267460320e1282b869a5f0b6c8789e769640", "&'()*+,-./0"}, {"$7z$128$19$0$1122$8$2ac0f1307794d8e10000000000000000$2871514580$112$99$4783d91fa72c377310654e961120e71ecdd27ec2e67366e83291daefcea03514ca9ecea031fcbd25c0759c1f242219e673cee093ef361664f18dacf85ca0620fd7092477ceeff7c548df0a475ce93278a564fe4ddb4ee2e4695cbe417a792e822204390ca5a530208a8ed51bc01f79e6", "&'()*+,-./01"}, {"$7z$128$19$0$1122$8$5bc4988c71cba8b70000000000000000$2815498089$112$99$0e4368dde66925e2bfac9a450291f8f817beaa891f08c4d2735d20b3147df581e2f3c53abfe2b0971186ac39280eb354ca5989f9043ad0288302d0ac59a3c8fa99d26c9619b81d22996f24eec1dba361afdd5e50060c2599a40a00c83c4ee0bc4ebe6e3126a64a743af95d9b22ee5867", "&'()*+,-./012"}, {"$7z$128$19$0$1122$8$33ab0ad513b7d6910000000000000000$107430285$112$99$f9f1195a4210eadc5b23f046f81c8cfaec3b90d8b6b67893f10bd9bedd0d859d0695bca5ce315cecbc2910dce27e4c1a1416675d841901c8d84846360b1919ebcba91143713c6b755758d3db64d39344da18222341818220cc43f3ee3a91cbc288f1aafe377b53def310d3b83d32aee3", "&'()*+,-./0123"}, {"$7z$128$19$0$1122$8$dd490a165c1b90f90000000000000000$2897354864$112$99$51efe41b67875503acebe2e199cb542a279520b468a61ba67b54612e317a84e95879a34eaad82124798f32c19f9c0786e8faaac768da5f6b2c91e3ba9f97a03a992c18b5b9b21a5f2b67ae9daeef37ec115f44bfb8b10ac3cb7862b6c024413a2ee801aa674df05e8b56bd8654f279f5", "&'()*+,-./01234"}, {"$7z$128$19$0$1122$8$9077cb191a5969b40000000000000000$3637063426$112$99$1e74746c59bdfe6b3f3d957493c9d5b92ba358f97e19d30e20443cb2fbac0501e07a162344ac7cf7cfa727c70a2bcf52593accc5c2c070c2331863ac76da5ad2f5de374292a87c6af67ab561f9cf71ae472ed1267d481c250f5b4d82d0ec0b2b8531db1fe4637c3f4e3a08de1b9b5418", "&'()*+,-./012345"}, {"$7z$128$19$0$1122$8$adc090d27b0343d30000000000000000$1147570982$112$99$ac14b9dc3751cfe6c1c719ceef3d73946fff2b0f924e06cd3177883df770e5505551bcf5598277801f46584a4f41530f50007c776d2bb91fd160148042275dfe4e420ff72244409f59c687a5bb2d0fc1bb29138689094fe40bb0f22785c63c631cd05abf4f7f3c9b6832e192e103d2f1", "&'()*+,-./0123456"}, {"$7z$128$19$0$1122$8$8dee69dc35517a2a0000000000000000$87427823$112$99$ea36cf8b577a0b5f31115f8550987f05f174b347a8a6433a08c013ecd816c8ecaad163c62db9bae6c57ace3c2a6ce0b36f78ad4723328cc022906400eed55e0e3685a5e8e6b369df780ee72f3d25ccd49d7f40d013052e080723dd4c0b1c75302c884ea956e3b6fd27261eb8c49dea51", "&'()*+,-./01234567"}, {"$7z$128$19$0$1122$8$200ce603d6f355f10000000000000000$3012105149$112$99$0ae42342f52172ad921178a25df3666e34e5a217d0afb3655088806f821d374bf522c197e59b131dbc574d4c936472f59f8892f69e47724ea52ecc5dc7d3ed734c557c9698a6f01519039714c065ad25008003c93cb7f694ee07267d5fcdebab5d149d5404023a0112faec2264d33ff6", "&'()*+,-./012345678"}, {"$7z$128$19$0$1122$8$a5007fc77fa5cc0b0000000000000000$1082728565$112$99$32c404c9633e9c61b76556e169695248008c51ca8f7f0f79c4a271ac6eb1d905a2622132f2f6988f9f3f5e375c592ec63d92d7b183b5801b149595ed440b23a083633de9f1cb5b6ac3238b7523b23141e686e6cbe9d4d3a28fc6489e902c17aeff6cd4cb516bef5cd5c6def78cb88ad4", "&'()*+,-./0123456789"}, {"$7z$128$19$0$1122$8$fd531c4e580be9a60000000000000000$1843420503$112$99$704289830b1add1c8ee6fd622ecf5b8da01988580bdb52f6269cc61c21838849d3a04299eaee15e0cae0eff9f6c3c82f71e434b3aa1c0ca824b90438c1c983130218acd128d9186e5dc2d19a8db602a0382cb60dadb4641b46fe532b799d29a4b882beaa9217f48ddccc99578617f8a0", "&'()*+,-./0123456789:"}, {"$7z$128$19$0$1122$8$7f94a95f71c1b0df0000000000000000$141406606$112$99$1a510a6fda9788b4f4b2274ea929044c00b61b23946bc417ead90ad64dcc9a55378f9ab74f7d693a5dcf455c00f82f6c2a885b664f4ab10c9969026714ce2773030f1c5872ca3948cd612e21b321826c2a561104d57a3ba2055f03aa9cc264821544ec4bccc41f4ac76aab97accb8f9c", "&'()*+,-./0123456789:;"}, {"$7z$128$19$0$1122$8$e24e93c7a9ebde080000000000000000$718561925$112$99$580bf36388526c932c22e3227b51774b6963a9c5b96fc8e2ac70a4302864fa88f50e7c00d9a79e0bca0f07a236e51200dc23435b7680e6fa99b19d790ac093af615a972f8b232686c21279234a2582f9714c5a1a2d326084158eba3e81b4f8ad40784d84baa8ddbed19f1c6603156d2c", "&'()*+,-./0123456789:;<"}, #if PLAINTEXT_LENGTH > 23 {"$7z$128$19$0$1122$8$6fbd519735b131710000000000000000$1248418560$112$99$cc9e3c97073d7fd37f04d4e6983b386e3ac00f6292dedb0f566dccf22cdbbb55fee8669edade383e96aa0a740e2b42aa7fddbe5831cac10828c624ee03a1a256c6e777c3d714c55296cb815c509a252b9426fe8d4566c944efe3fac5ea94910e55a390aef2c729a031e832c406049810", "&'()*+,-./0123456789:;<="}, {"$7z$128$19$0$1122$8$3ce1b899fc03d9c30000000000000000$1452122600$112$99$d4be60d5ab390713c7189f0dd808227c01f15f71fcf4bbccce6cb9238d6418c115eff59784d96ff8944575710a5799c7bcb761e8f1bfb7646a0e8fac3728ba4cca44fb82e5dd9f87bb26828566af64374b512fa094d35af8d743bded88b6257ec98a99b50dd225d4608b283bf035ac08", "&'()*+,-./0123456789:;<=>"}, {"$7z$128$19$0$1122$8$656e2285aabed25b0000000000000000$3885982465$112$99$77f2871e556e7f5278a9e896e91cd386ca8935128957d31fdce0603ea0e71c08b908a4c2d9f2d279757ced848be9482067c9d7935c88e5233aaa94a101d29908f7f015646758029d2078d25d0886bb9f0cdc0dd5136d72e90ceeea678564b199866dd8c9e5fe927102ee2dcf1cd4167f", "&'()*+,-./0123456789:;<=>?"}, {"$7z$128$19$0$1122$8$44ffefa48fa5a5b00000000000000000$1011653568$112$99$5d2504a1eb819218b9ad552e377d37e811ffccb64a554f404d982d209edfafb893b679cc881bbcbc606e67ffa055f712d7f140b554769511bc00321765830ea7c5db810fa2000ae7f4250b74aa61d881db66ae6f30e4c8e71887960c117b268d9934b8b5d52d4abdcb42b0e4ff40b805", "&'()*+,-./0123456789:;<=>?@"}, {"$7z$128$19$0$1122$8$b6e089dd0c52b6b80000000000000000$1229766981$112$99$49a8334d64d9cc7d710fe3b9c35f5d7cb0ec44d5db8a90966fbee93f85fdeeeca859c55519addb20c4628c9204dd24d1169b34dc53a2a685440fae7ed6748c172a8e9dcc42c8dffe60196818ad17a6f9314fcfd4d97cab3c18cf279df344e00fd04eaff32f29cbfcdb6832cfb69fe351", "&'()*+,-./0123456789:;<=>?@A"}, #endif /* PLAINTEXT_LENGTH > 23 */ #endif /* DEBUG */ {NULL} }; static sevenzip_password *inbuffer; static sevenzip_hash *outbuffer; static sevenzip_salt currentsalt; static cl_mem mem_in, mem_out, mem_salt; static cl_kernel sevenzip_init, sevenzip_final, sevenzip_aes; #define insize (sizeof(sevenzip_password) * global_work_size) #define outsize (sizeof(sevenzip_hash) * global_work_size) #define statesize (sizeof(sevenzip_state) * global_work_size) #define saltsize sizeof(sevenzip_salt) #define cracked_size (sizeof(*cracked) * global_work_size) static struct fmt_main *self; #define HASH_LOOPS 0x4000 #define LOOP_COUNT ((1 << currentsalt.iterations) / HASH_LOOPS) #define STEP 0 #define SEED 16 static int split_events[] = { 2, -1, -1 }; static const char *warn[] = { "xfer: ", ", init: ", ", crypt: ", ", final: ", ", aes: ", ", xfer: " }; // This file contains auto-tuning routine(s). It has to be included after formats definitions. #include "opencl_autotune.h" #include "memdbg.h" /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { size_t s; s = autotune_get_task_max_work_group_size(FALSE, 0, sevenzip_init); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel)); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, sevenzip_final)); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, sevenzip_aes)); return s; } static void create_clobj(size_t global_work_size, struct fmt_main *self) { cl_int cl_error; inbuffer = (sevenzip_password*) mem_calloc(1, insize); outbuffer = (sevenzip_hash*) mem_alloc(outsize); cracked = mem_calloc(1, cracked_size); // Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, saltsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem salt"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(sevenzip_init, 0, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(sevenzip_final, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(sevenzip_final, 1, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(sevenzip_final, 2, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(sevenzip_aes, 0, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(sevenzip_aes, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem salt"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(cracked); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(sevenzip_init), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(sevenzip_final), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(sevenzip_aes), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { CRC32_t crc; self = _self; opencl_prepare_dev(gpu_id); CRC32_Init(&crc); if (options.target_enc == UTF_8) self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; cl_int cl_error; snprintf(build_opts, sizeof(build_opts), "-DPLAINTEXT_LENGTH=%d -DHASH_LOOPS=%d", PLAINTEXT_LENGTH, HASH_LOOPS); opencl_init("$JOHN/kernels/7z_kernel.cl", gpu_id, build_opts); sevenzip_init = clCreateKernel(program[gpu_id], "sevenzip_init", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); crypt_kernel = clCreateKernel(program[gpu_id], "sevenzip_loop", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); sevenzip_final = clCreateKernel(program[gpu_id], "sevenzip_final", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); sevenzip_aes = clCreateKernel(program[gpu_id], "sevenzip_aes", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, HASH_LOOPS, split_events, warn, 2, self, create_clobj, release_clobj, sizeof(sevenzip_state), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1 << 19, 0, 15000000000ULL); } } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int type, len, NumCyclesPower; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "$")) == NULL) goto err; if (strlen(p) > 3 || !isdec(p)) goto err; type = atoi(p); if (strlen(p) == 0 || type < 0 || type > 128) /* Compression type */ goto err; if (type > 2 && type != 128) /* none, LZMA or LZMA2 */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* NumCyclesPower */ goto err; if (strlen(p) > 2) goto err; if (!isdec(p)) goto err; NumCyclesPower = atoi(p); if (NumCyclesPower > 24 || NumCyclesPower < 1) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt length */ goto err; if (!isdec(p)) goto err; len = atoi(p); if (len != 0) /* salt length, we currently only support it in CPU format */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* iv length */ goto err; if (strlen(p) > 2) goto err; if (!isdec(p)) goto err; len = atoi(p); if (len > 16) /* iv length */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* iv */ goto err; if (!ishexlc(p)) goto err; if (strlen(p) / 2 > len && strcmp(p+len*2, "0000000000000000")) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* crc */ goto err; if (!isdecu(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* data length */ goto err; if (!isdec(p)) goto err; len = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) /* unpacksize */ goto err; if (!isdec(p)) /* no way to validate, other than atoi() works for it */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* data */ goto err; if (strlen(p) / 2 != len) /* validates data_len atoi() */ goto err; if (!ishexlc(p)) goto err; if (type && type != 128) { if ((p = strtokm(NULL, "$")) == NULL) /* CRC len */ goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* Coder props */ goto err; if (!ishexlc(p)) goto err; if (type == 1 && strlen(p) != 10) goto err; else if (type == 2 && strlen(p) != 2) goto err; } MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { struct custom_salt cs; struct custom_salt *psalt; static void *ptr; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; if (!ptr) ptr = mem_alloc_tiny(sizeof(struct custom_salt*), sizeof(struct custom_salt*)); memset(&cs, 0, sizeof(cs)); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "$"); cs.type = atoi(p); p = strtokm(NULL, "$"); cs.NumCyclesPower = atoi(p); p = strtokm(NULL, "$"); cs.SaltSize = atoi(p); p = strtokm(NULL, "$"); /* salt */ p = strtokm(NULL, "$"); cs.ivSize = atoi(p); p = strtokm(NULL, "$"); /* iv */ for (i = 0; i < cs.ivSize; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "$"); /* crc */ cs.crc = atou(p); /* unsigned function */ p = strtokm(NULL, "$"); cs.length = atoll(p); psalt = malloc(sizeof(struct custom_salt) + cs.length - 1); memcpy(psalt, &cs, sizeof(cs)); p = strtokm(NULL, "$"); psalt->unpacksize = atoll(p); p = strtokm(NULL, "$"); /* data */ for (i = 0; i < psalt->length; i++) psalt->data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; if (cs.type && cs.type != 128) { p = strtokm(NULL, "$"); /* CRC length */ psalt->crc_len = atoi(p); p = strtokm(NULL, "$"); /* Coder properties */ for (i = 0; p[i * 2] ; i++) psalt->props[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; } MEM_FREE(keeptr); psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(struct custom_salt, length); psalt->dsalt.salt_cmp_size = SALT_CMP_SIZE(struct custom_salt, length, data, psalt->length); psalt->dsalt.salt_alloc_needs_free = 1; memcpy(ptr, &psalt, sizeof(void*)); return ptr; } static void set_salt(void *salt) { cur_salt = *((struct custom_salt **)salt); //memcpy(currentsalt.salt, cur_salt->salt, cur_salt->SaltSize); //currentsalt.salt_size = cur_salt->SaltSize; if (currentsalt.iterations != cur_salt->NumCyclesPower) new_keys = 1; if (cur_salt->length >= 32) memcpy(currentsalt.data, cur_salt->data + cur_salt->length - 32, 32); currentsalt.length = cur_salt->length; currentsalt.unpacksize = cur_salt->unpacksize; currentsalt.iterations = cur_salt->NumCyclesPower; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, saltsize, &currentsalt, 0, NULL, NULL), "Transfer salt to gpu"); } static void clear_keys(void) { memset(inbuffer, 0, insize); } static void sevenzip_set_key(char *key, int index) { UTF16 c_key[PLAINTEXT_LENGTH + 1]; int length = strlen(key); /* Convert password to utf-16-le format (--encoding aware) */ length = enc_to_utf16(c_key, PLAINTEXT_LENGTH, (UTF8*)key, length); if (length <= 0) length = strlen16(c_key); length *= 2; inbuffer[index].length = length; memcpy(inbuffer[index].v, c_key, length); new_keys = 1; } static char *get_key(int index) { UTF16 c_key[PLAINTEXT_LENGTH + 1]; int length = inbuffer[index].length; memcpy(c_key, inbuffer[index].v, length); c_key[length / 2] = 0; return (char*)utf16_to_enc(c_key); } static int salt_compare(const void *x, const void *y) { int c; const struct custom_salt *s1 = *((struct custom_salt**)x); const struct custom_salt *s2 = *((struct custom_salt**)y); // we had to make the salt order deterministic, so that intersalt-restore works if (s1->NumCyclesPower != s2->NumCyclesPower) return (s1->NumCyclesPower - s2->NumCyclesPower); c = memcmp(s1->salt, s2->salt, 16); if (c) return c; return memcmp(s1->iv, s2->iv, 16); } static void *SzAlloc(void *p, size_t size) { return mem_alloc(size); } static void SzFree(void *p, void *address) { MEM_FREE(address) }; static int sevenzip_decrypt(sevenzip_hash *derived) { unsigned char *out = NULL; AES_KEY akey; unsigned char iv[16]; union { unsigned char crcc[4]; unsigned int crci; } _crc_out; unsigned char *crc_out = _crc_out.crcc; unsigned int ccrc; CRC32_t crc; size_t crc_len = cur_salt->unpacksize; size_t aes_len = cur_salt->crc_len ? (cur_salt->crc_len * 11 + 150) / 160 * 16 : crc_len; /* * Early rejection (only decrypt last 16 bytes). We don't seem to * be able to trust this, see #2532, so we only do it for truncated * hashes (it's the only thing we can do!). */ if ((TRUST_PADDING || cur_salt->type == 0x80) && derived->reject) return 0; if (cur_salt->type == 0x80) /* We only have truncated data */ return 1; /* Complete decryption, or partial if possible */ aes_len = MIN(aes_len, cur_salt->length); out = mem_alloc(aes_len); memcpy(iv, cur_salt->iv, 16); AES_set_decrypt_key((unsigned char*)derived->key, 256, &akey); AES_cbc_encrypt(cur_salt->data, out, aes_len, &akey, iv, AES_DECRYPT); /* Optional decompression before CRC */ if (cur_salt->type == 1) { ISzAlloc st_alloc = {SzAlloc, SzFree}; ELzmaStatus status; size_t in_size = aes_len; uint8_t *new_out; SRes rc; size_t out_size = cur_salt->crc_len; new_out = mem_alloc(out_size); if ((rc = LzmaDecode(new_out, &out_size, out, &in_size, cur_salt->props, LZMA_PROPS_SIZE, LZMA_FINISH_ANY, &status, &st_alloc)) == SZ_OK && out_size == cur_salt->crc_len) { MEM_FREE(out); out = new_out; crc_len = cur_salt->crc_len; } else { MEM_FREE(new_out); goto exit_bad; } } else if (cur_salt->type == 2) { Byte prop = cur_salt->props[0]; ISzAlloc st_alloc = {SzAlloc, SzFree}; ELzmaStatus status; size_t in_size = aes_len; uint8_t *new_out; SRes rc; size_t out_size = cur_salt->crc_len; new_out = mem_alloc(out_size); if ((rc = Lzma2Decode((Byte*)new_out, &out_size, out, &in_size, prop, LZMA_FINISH_ANY, &status, &st_alloc)) == SZ_OK && out_size == cur_salt->crc_len) { MEM_FREE(out); out = new_out; crc_len = cur_salt->crc_len; } else { MEM_FREE(new_out); goto exit_bad; } } /* CRC test */ CRC32_Init(&crc); CRC32_Update(&crc, out, crc_len); CRC32_Final(crc_out, crc); ccrc = _crc_out.crci; /* computed CRC */ #if !ARCH_LITTLE_ENDIAN ccrc = JOHNSWAP(ccrc); #endif if (ccrc == cur_salt->crc) goto exit_good; exit_bad: MEM_FREE(out); return 0; exit_good: MEM_FREE(out); return 1; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; //fprintf(stderr, "%s(%d) lws %zu gws %zu\n", __FUNCTION__, count, local_work_size, global_work_size); if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } if (ocl_autotune_running || new_keys) { int i; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); // Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); // Run 1st kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], sevenzip_init, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run init kernel"); // Run loop kernel for (i = 0; i < (ocl_autotune_running ? 1 : LOOP_COUNT); i++) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[2]), "Run loop kernel"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } // Run final kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], sevenzip_final, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[3]), "Run final loop kernel"); } new_keys = 0; if (TRUST_PADDING || cur_salt->type == 0x80) { // Run AES kernel (only for truncated hashes) BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], sevenzip_aes, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[4]), "Run AES kernel"); } // Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[5]), "Copy result back"); if (!ocl_autotune_running) { #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { /* decrypt and check */ if ((cracked[index] = sevenzip_decrypt(&outbuffer[index]))) { #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = *((struct custom_salt **)salt); return (unsigned int)(1 << my_salt->NumCyclesPower); } static unsigned int padding_size(void *salt) { struct custom_salt *my_salt; my_salt = *((struct custom_salt **)salt); return my_salt->length - my_salt->unpacksize; } static unsigned int compression_type(void *salt) { struct custom_salt *my_salt; my_salt = *((struct custom_salt **)salt); return my_salt->type; } struct fmt_main fmt_opencl_sevenzip = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_UNICODE | FMT_UTF8 | FMT_DYNA_SALT | FMT_HUGE_INPUT, { "iteration count", "padding size", "compression type", }, { FORMAT_TAG }, sevenzip_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { iteration_count, padding_size, compression_type, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, salt_compare, set_salt, sevenzip_set_key, get_key, clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
GB_binop__islt_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__islt_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__islt_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__islt_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_fp32) // A*D function (colscale): GB (_AxD__islt_fp32) // D*A function (rowscale): GB (_DxB__islt_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__islt_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__islt_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_fp32) // C=scalar+B GB (_bind1st__islt_fp32) // C=scalar+B' GB (_bind1st_tran__islt_fp32) // C=A+scalar GB (_bind2nd__islt_fp32) // C=A'+scalar GB (_bind2nd_tran__islt_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_FP32 || GxB_NO_ISLT_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__islt_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__islt_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__islt_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ike_fmt_plug.c
/* PSK cracker patch for JtR. Hacked together during March of 2012 by * Dhiru Kholia <dhiru.kholia at gmail.com> . * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com> * and it is hereby released to the general public under GPL * * The IKE Scanner (ike-scan) is Copyright (C) 2003-2007 Roy Hills, * NTA Monitor Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * In addition, as a special exception, the copyright holders give * permission to link the code of portions of this program with the * OpenSSL library, and distribute linked combinations including the two. * * You must obey the GNU General Public License in all respects * for all of the code used other than OpenSSL. If you modify * file(s) with this exception, you may extend this exception to your * version of the file(s), but you are not obligated to do so. If you * do not wish to do so, delete this exception statement from your * version. * * If this license is unacceptable to you, I may be willing to negotiate * alternative licenses (contact ike-scan@nta-monitor.com). * * You are encouraged to send comments, improvements or suggestions to * me at ike-scan@nta-monitor.com. * * psk-crack.c -- IKE Aggressive Mode Pre-Shared Key cracker for ike-scan * * Author: Roy Hills * Date: 8 July 2004 * * July, 2012, JimF small changes made, many more should be done. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_ike; #elif FMT_REGISTERS_H john_register_one(&fmt_ike); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "ike-crack.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 16 #endif static int omp_t = 1; #endif #include "memdbg.h" #define FORMAT_LABEL "IKE" #define FORMAT_NAME "PSK" #define ALGORITHM_NAME "HMAC MD5/SHA1 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define BINARY_SIZE 20 /* SHA1 */ #define BINARY_SIZE_SMALLER 16 /* MD5 */ #define SALT_SIZE sizeof(psk_entry) #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN sizeof(size_t) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 16 static struct fmt_tests ike_tests[] = { {"$ike$*0*5c7916ddf8db4d233b3b36005bb3ccc115a73807e11a897be943fd4a2d0f942624cb00588d8b3a0a26502b73e639df217ef6c4cb90f96b0a3c3ef2f62ed025b4a705df9de65e33e380c1ba5fa23bf1f9911bbf388d0844256fa0131fc5cf8acb396936ba3295b4637b039d93f58db90a3a1cf1ef5051103bacf6e1a3334f9f89*fde8c68c5f324c7dbcbadde1d757af6962c63496c009f77cad647f2997fd4295e50821453a6dc2f6279fd7fef68768584d9cee0da6e68a534a097ce206bf77ecc798310206f3f82d92d02c885794e0a430ceb2d6b43c2aff45a6e14c6558382df0692ff65c2724eef750764ee456f31424a5ebd9e115d826bbb9722111aa4e01*b2a3c7aa4be95e85*756e3fa11c1b102c*00000001000000010000002c01010001000000240101000080010001800200018003000180040002800b0001000c000400007080*01000000ac100202*251d7ace920b17cb34f9d561bca46d037b337d19*e045819a64edbf022620bff3efdb935216584cc4*b9c594fa3fca6bb30a85c4208a8df348", "abc123"}, {"$ike$*0*9bdee7aa341cf1a6c19bc0191106b5056537ce6b837cd70678ea5a3ccb606b56dee4548feb67f24fd6f4d5f58967a9ff3c674d9d79e4195b7def5aac147c9fe9abdc2f8ba2eca58f4c863fedc7a8c8e1ad6e1551b1e44bf9a0e258561a5db1c2ca1e8b5dfda1b012012b6fdf24ecd07da6b10d76ab3b58d07b30b4f9da26aee4*c9b7ef0610a22b3e1c88b1a01ce4d4110edf6baa122ed1285eb2184cd75d30a11520a725c2d263de5a157f77f953880732f3b14521836d7f3585cb0ce3fcadf81c541dde2680bd81953cf88e8f8096c173470694ca7414fff9df0cdcdbb9d4f70ef1d6347293b507cfad965e2d2c1fa07326353e9a493d93284970040344fb11*3506592130312567*6c362583ce7a2a26*00000001000000010000002c01010001000000240101000080010001800200028003000180040002800b0001000c000400007080*01000000ac100202*84943233f42a0b5a9b33c327162fe0efee2545e4*76f451dce3fea6402b67f3fddae561ebdb4a6efe*f63f237b3c0f1fe57a5b852203cfd27cbf0c78d4", "abc123"}, {NULL} }; static psk_entry *cur_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ptr, *ctcopy, *keeptr; if (strncmp(ciphertext, "$ike$*", 6)) return 0; if (!(ctcopy = strdup(ciphertext))) return 0; keeptr = ctcopy; ctcopy += 6; /* skip leading '$ike$*' */ if (*ctcopy != '0' && *ctcopy != '1') goto error; /* skip '*0' */ ctcopy += 1; if (*ctcopy != '*') goto error; ctcopy += 1; if (!(ptr = strtokm(ctcopy, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; if (strlen(ptr) != 32 && strlen(ptr) != 40) // md5 or sha1 length. goto error; if (!ishexlc(ptr)) goto error; MEM_FREE(keeptr); return 1; error: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { static psk_entry cs; cs.isnortel = atoi(&ciphertext[6]); load_psk_params(&ciphertext[8], NULL, &cs); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE_SMALLER; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (psk_entry *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { compute_hash(cur_salt, saved_key[index], (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (*((ARCH_WORD_32*)binary) == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return (*((ARCH_WORD_32*)binary) == crypt_out[index][0]); } static int cmp_exact(char *source, int index) { void *binary = get_binary(source); return !memcmp(binary, crypt_out[index], BINARY_SIZE_SMALLER); } static void ike_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } /* * For ike, the hash algorithm used for hmac * is returned as the first "tunable cost": * 1: MD5 * 2: SHA1 * * However, the there is almost no difference in speed, * so if the different hash types for HMAC shouldn't be reported, * just define IKE_REPORT_TUNABLE_COSTS to be 0 instead of 1. */ #define IKE_REPORT_TUNABLE_COSTS 1 #if IKE_REPORT_TUNABLE_COSTS static unsigned int tunable_cost_hmac_hash_type(void *salt) { psk_entry *my_salt; my_salt = salt; return (unsigned int) my_salt->hash_type; } #endif struct fmt_main fmt_ike = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE_SMALLER, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { #if IKE_REPORT_TUNABLE_COSTS "hash algorithm used for hmac [1:MD5 2:SHA1]", #else NULL #endif }, ike_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { #if IKE_REPORT_TUNABLE_COSTS tunable_cost_hmac_hash_type, #else NULL #endif }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, ike_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
facedetectcnn.h
/* By downloading, copying, installing or using the software you agree to this license. If you do not agree to this license, do not download, install, copy or use the software. License Agreement For libfacedetection (3-clause BSD License) Copyright (c) 2018-2020, Shiqi Yu, all rights reserved. shiqi.yu@gmail.com Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the names of the copyright holders nor the names of the contributors may be used to endorse or promote products derived from this software without specific prior written permission. This software is provided by the copyright holders and contributors "as is" and any express or implied warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. In no event shall copyright holders or contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage. */ #pragma once #include "facedetection_export.h" //#define _ENABLE_AVX512 //Please enable it if X64 CPU //#define _ENABLE_AVX2 //Please enable it if X64 CPU //#define _ENABLE_NEON //Please enable it if ARM CPU FACEDETECTION_EXPORT int * facedetect_cnn(unsigned char * result_buffer, //buffer memory for storing face detection results, !!its size must be 0x20000 Bytes!! unsigned char * rgb_image_data, int width, int height, int step); //input image, it must be BGR (three channels) insteed of RGB image! /* DO NOT EDIT the following code if you don't really understand it. */ #if defined(_ENABLE_AVX512) || defined(_ENABLE_AVX2) #include <immintrin.h> #endif #if defined(_ENABLE_NEON) #include "arm_neon.h" //NEON does not support UINT8*INT8 dot product //to conver the input data to range [0, 127], //and then use INT8*INT8 dot product #define _MAX_UINT8_VALUE 127 #else #define _MAX_UINT8_VALUE 255 #endif #if defined(_ENABLE_AVX512) #define _MALLOC_ALIGN 512 #elif defined(_ENABLE_AVX2) #define _MALLOC_ALIGN 256 #else #define _MALLOC_ALIGN 128 #endif #if defined(_ENABLE_AVX512)&& defined(_ENABLE_NEON) #error Cannot enable the two of AVX512 and NEON at the same time. #endif #if defined(_ENABLE_AVX2)&& defined(_ENABLE_NEON) #error Cannot enable the two of AVX and NEON at the same time. #endif #if defined(_ENABLE_AVX512)&& defined(_ENABLE_AVX2) #error Cannot enable the two of AVX512 and AVX2 at the same time. #endif #if defined(_OPENMP) #include <omp.h> #endif #include <string.h> #include <vector> #include <iostream> #include <typeinfo> using namespace std; void* myAlloc(size_t size); void myFree_(void* ptr); #define myFree(ptr) (myFree_(*(ptr)), *(ptr)=0); #ifndef MIN # define MIN(a,b) ((a) > (b) ? (b) : (a)) #endif #ifndef MAX # define MAX(a,b) ((a) < (b) ? (b) : (a)) #endif typedef struct FaceRect_ { float score; int x; int y; int w; int h; int lm[10]; }FaceRect; typedef struct ConvInfoStruct_ { int pad; int stride; int kernel_size; int channels; int num; float scale; signed char* pWeights; signed int* pBias; }ConvInfoStruct; template <class T> class CDataBlob { public: T * data; int width; int height; int channels; int channelStep; float scale; //when the datablob is a filter, the bias is 0 by default //if it is the filted data, the bias is 1 by default int bias; public: CDataBlob() { data = 0; width = 0; height = 0; channels = 0; channelStep = 0; scale = 1.0f; bias = 0; } CDataBlob(int w, int h, int c) { data = 0; create(w, h, c); } ~CDataBlob() { setNULL(); } void setNULL() { if (data) myFree(&data); width = height = channels = channelStep = 0; scale = 1.0f; } bool create(int w, int h, int c) { setNULL(); width = w; height = h; channels = c; bias = 0; //alloc space for int8 array int remBytes = (sizeof(T)* channels) % (_MALLOC_ALIGN / 8); if (remBytes == 0) this->channelStep = channels * sizeof(T); else this->channelStep = (channels * sizeof(T)) + (_MALLOC_ALIGN / 8) - remBytes; data = (T*)myAlloc(size_t(width) * height * this->channelStep); if (data == NULL) { cerr << "Failed to alloc memeory for uint8 data blob: " << width << "*" << height << "*" << channels << endl; return false; } //memset(data, 0, width * height * channelStep); //the following code is faster than memset //but not only the padding bytes are set to zero. //BE CAREFUL!!! //#if defined(_OPENMP) //#pragma omp parallel for //#endif for (int r = 0; r < this->height; r++) { for (int c = 0; c < this->width; c++) { int pixel_end = this->channelStep / sizeof(T); T * pI = (this->data + (size_t(r) * this->width + c) * this->channelStep /sizeof(T)); for (int ch = this->channels; ch < pixel_end; ch++) pI[ch] = 0; } } return true; } bool setInt8FilterData(signed char * pData, int bias, int dataWidth, int dataHeight, int dataChannels) { if (pData == NULL) { cerr << "The input image data is null." << endl; return false; } if (typeid(signed char) != typeid(T)) { cerr << "Data must be signed char, the same with the source data." << endl; return false; } if (dataWidth != this->width || dataHeight != this->height || dataChannels != this->channels) { cerr << "The dimension of the data can not match that of the Blob." << endl; return false; } for(int row = 0; row < height; row++) for (int col = 0; col < width; col++) { T * p = (this->data + (size_t(width) * row + col) * channelStep /sizeof(T)); for (int ch = 0; ch < channels; ch++) { p[ch] = pData[ch * height * width + row * width + col]; } } this->bias = bias; return true; } bool setDataFrom3x3S2P1to1x1S1P0FromImage(const unsigned char * imgData, int imgWidth, int imgHeight, int imgChannels, int imgWidthStep) { if (imgData == NULL) { cerr << "The input image data is null." << endl; return false; } if (typeid(unsigned char) != typeid(T)) { cerr << "Data must be unsigned char, the same with the source data." << endl; return false; } if (imgChannels != 3) { cerr << "The input image must be a 3-channel RGB image." << endl; return false; } create((imgWidth+1)/2, (imgHeight+1)/2, 27); //since the pixel assignment cannot fill all the elements in the blob. //some elements in the blob should be initialized to 0 memset(data, 0, size_t(width) * height * channelStep); #if defined(_OPENMP) #pragma omp parallel for #endif for (int r = 0; r < this->height; r++) { for (int c = 0; c < this->width; c++) { T * pData = (unsigned char*)this->data + (size_t(r) * this->width + c) * this->channelStep; for (int fy = -1; fy <= 1; fy++) { int srcy = r * 2 + fy; if (srcy < 0 || srcy >= imgHeight) //out of the range of the image continue; for (int fx = -1; fx <= 1; fx++) { int srcx = c * 2 + fx; if (srcx < 0 || srcx >= imgWidth) //out of the range of the image continue; const unsigned char * pImgData = imgData + size_t(imgWidthStep) * srcy + imgChannels * srcx; int output_channel_offset = ((fy + 1) * 3 + fx + 1) * 3; //3x3 filters, 3-channel image #if defined(_ENABLE_NEON) pData[output_channel_offset] = (pImgData[0] / 2); pData[output_channel_offset + 1] = (pImgData[1] / 2); pData[output_channel_offset + 2] = (pImgData[2] / 2); #else pData[output_channel_offset] = (pImgData[0]); pData[output_channel_offset+1] = (pImgData[1]); pData[output_channel_offset+2] = (pImgData[2]); #endif } } } } #if defined(_ENABLE_NEON) this->bias = 1; // 1/2 = 0 this->scale = 0.5f; #else this->bias = 1; this->scale = 1.0f; #endif return true; } T getElement(int x, int y, int channel) { if (this->data) { if (x >= 0 && x < this->width && y >= 0 && y < this->height && channel >= 0 && channel < this->channels) { T * p = this->data + (size_t(y) * this->width + x) * this->channelStep/sizeof(T); return (p[channel]); } } return (T)(0); } friend ostream &operator<<(ostream &output, const CDataBlob &dataBlob) { output << "DataBlob Size (Width, Height, Channel, scale) = (" << dataBlob.width << ", " << dataBlob.height << ", " << dataBlob.channels << ", " << dataBlob.scale << ", " << dataBlob.bias << ")" << endl; for (int ch = 0; ch < dataBlob.channels; ch++) { output << "Channel " << ch << ": " << endl; for (int row = 0; row < dataBlob.height; row++) { output << "("; for (int col = 0; col < dataBlob.width; col++) { T * p = (dataBlob.data + (dataBlob.width * row + col) * dataBlob.channelStep /sizeof(T) ); if(sizeof(T)<4) output << (int)(p[ch]); else output << p[ch]; if (col != dataBlob.width - 1) output << ", "; } output << ")" << endl; } } return output; } }; class Filters { public: vector<CDataBlob<signed char> *> filters; int pad; int stride; float scale; //element * scale = original value Filters() { pad = 0; stride = 0; scale = 0; } ~Filters() { for (int i = 0; i < filters.size(); i++) { delete filters[i]; filters[i] = 0; } } }; bool convertInt2Float(CDataBlob<int> * inputData, CDataBlob<float> * outputData); bool convolution(CDataBlob<unsigned char> *inputData, const Filters* filters, CDataBlob<int> *outputData); bool convolution_relu(CDataBlob<unsigned char> *inputData, const Filters* filters, CDataBlob<unsigned char> *outputData); bool maxpooling2x2S2(const CDataBlob<unsigned char> *inputData, CDataBlob<unsigned char> *outputData); bool priorbox(const CDataBlob<unsigned char> * featureData, int img_width, int img_height, int step, int num_sizes, float * pWinSizes, CDataBlob<float> * outputData); template<typename T> bool concat4(const CDataBlob<T> *inputData1, const CDataBlob<T> *inputData2, const CDataBlob<T> *inputData3, const CDataBlob<T> *inputData4, CDataBlob<T> *outputData); /* the input data for softmax must be a vector, the data stored in a multi-channel blob with size 1x1 */ template<typename T> bool blob2vector(const CDataBlob<T> * inputData, CDataBlob<T> * outputData); bool softmax1vector2class(CDataBlob<float> *inputOutputData); bool detection_output(const CDataBlob<float> * priorbox, const CDataBlob<float> * loc, const CDataBlob<float> * conf, float overlap_threshold, float confidence_threshold, int top_k, int keep_top_k, CDataBlob<float> * outputData); vector<FaceRect> objectdetect_cnn(unsigned char * rgbImageData, int with, int height, int step);
GB_binop__land_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__land_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__land_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__land_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_fp64) // A*D function (colscale): GB (_AxD__land_fp64) // D*A function (rowscale): GB (_DxB__land_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__land_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__land_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_fp64) // C=scalar+B GB (_bind1st__land_fp64) // C=scalar+B' GB (_bind1st_tran__land_fp64) // C=A+scalar GB (_bind2nd__land_fp64) // C=A'+scalar GB (_bind2nd_tran__land_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) && (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_FP64 || GxB_NO_LAND_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__land_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__land_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__land_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__land_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__land_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ceil_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com * Update: hhchen@openailab.com */ #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> int ref_ceil_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { // dims size = 2 or 3 if (input_tensor->dim_num < 4) { float* input_data = input_tensor->data; float* out_data = output_tensor->data; int total_size = input_tensor->elem_num; for (int i = 0; i < total_size; i++) { input_data[i] = ceilf(out_data[i]); } return 0; } // dims size 3 else if (input_tensor->dim_num == 4) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = input_tensor->data; float* out_data = output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = ceilf(src[i]); } } return 0; } return -1; } int ref_ceil_uint8(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { /* dequant */ uint8_t* input_uint8 = input_tensor->data; uint8_t* output_uint8 = output_tensor->data; float input_scale = input_tensor->scale; float output_scale = output_tensor->scale; int32_t input_zero = input_tensor->zero_point; int32_t output_zero = output_tensor->zero_point; int input_size = input_tensor->elem_num; int output_size = output_tensor->elem_num; float* input_data = ( float* )sys_malloc(input_size * sizeof(float)); float* out_data = ( float* )sys_malloc(output_size * sizeof(float)); for (int i = 0; i < input_size; i++) { input_data[i] = (( float )input_uint8[i] - ( float )input_zero) * input_scale; } // dims size = 2 or 3 if (input_tensor->dim_num < 4) { int total_size = input_tensor->elem_num; for (int i = 0; i < total_size; i++) { input_data[i] = ceil(out_data[i]); } // return 0; } // dims size 3 else if (input_tensor->dim_num == 4) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = ceil(src[i]); } } // return 0; } /* quant */ for (int i = 0; i < output_size; i++) { int udata = round(out_data[i] / output_scale + output_zero); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[i] = udata; } sys_free(input_data); sys_free(out_data); return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int ret = -1; if (input_tensor->data_type == TENGINE_DT_FP32) ret = ref_ceil_fp32(input_tensor, output_tensor, exec_graph->num_thread); else if(input_tensor->data_type == TENGINE_DT_UINT8) ret = ref_ceil_uint8(input_tensor, output_tensor, exec_graph->num_thread); else TLOG_ERR("Input data type %d not to be supported.\n", input_tensor->data_type); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_ceil_ref_op(void* arg) { return register_builtin_node_ops(OP_CEIL, &hcl_node_ops); } int unregister_ceil_ref_op(void* arg) { return unregister_builtin_node_ops(OP_CEIL, &hcl_node_ops); }
t_factorize_cpu_serial.c
/* ========================================================================== */ /* === GPU/t_factorize_cpu_serial.c ========================================= */ /* ========================================================================== */ /* ----------------------------------------------------------------------------- * CHOLMOD/GPU Module. Copyright (C) 2005-2012, Timothy A. Davis * The CHOLMOD/GPU Module is licensed under Version 2.0 of the GNU * General Public License. See gpl.txt for a text of the license. * CHOLMOD is also available under other licenses; contact authors for details. * http://www.suitesparse.com * -------------------------------------------------------------------------- */ /* * File: * t_factorize_cpu_serial * * Description: * Contains functions for factorization * of the CPU algorithm. (serial version) * */ /* includes */ #include <string.h> #include <time.h> #include "cholmod_template.h" #ifdef MKLROOT #include "mkl.h" #endif /* * Function: * gpu_factorize_cpu_serial * * Description: * Factorizes entire elimination tree on the CPU (serial version) * Returns 0 if matrix not positive-definite, 1 otherwise. * */ int TEMPLATE2 (CHOLMOD (gpu_factorize_cpu_serial)) ( cholmod_common *Common, cholmod_factor *L, cholmod_global_pointers *gb_p, cholmod_cpu_pointers *cpu_p, cholmod_tree_pointers *tree_p, cholmod_profile_pointers *prof_p, int deviceid ) { /* local variables */ int i, j, k, numThreads; Int px, pk, pf, p, q, d, s, n, ss, nsuper, ndrow, ndrow1, ndrow2, ndrow3, ndcol, nsrow, nsrow2, nscol, nscol2, nscol3, kd1, kd2, k1, k2, psx, psi, pdx, pdx1, pdi, pdi1, pdi2, pdend, psend, pfend, pend, dancestor, sparent, imap, start, end, super, dnext, info, repeat_supernode, Apacked, Fpacked, stype; Int *Ls, *Lpi, *Lpx, *Lpos, *Fp, *Fi, *Fnz, *Ap, *Ai, *Anz, *Super, *Map, *RelativeMap, *SuperMap, *Head, *Next, *Next_save, *Lpos_save; double *Lx, *Ax, *Az, *Fx, *Fz, *C, *beta, *tstart, *tend, *syrk_time, *gemm_time, *potrf_time, *trsm_time, *syrk_flops, *gemm_flops, *potrf_flops, *trsm_flops; double one[2] = {1.0, 0.0}, zero[2] = {0.0, 0.0}; double tstart1; /* * Set variables & pointers */ /* set host variables */ n = L->n; numThreads = Common->ompNumThreads; nsuper = L->nsuper; repeat_supernode = FALSE ; Apacked = cpu_p->Apacked; Fpacked = cpu_p->Fpacked; stype = cpu_p->stype; beta = cpu_p->beta; /* set host pointers */ Ls = cpu_p->Ls; Lpi = cpu_p->Lpi; Lpx = L->px; Lpos = cpu_p->Lpos; Fp = cpu_p->Fp; Fi = cpu_p->Fi; Fnz = cpu_p->Fnz; Ap = cpu_p->Ap; Ai = cpu_p->Ai; Anz = cpu_p->Anz; Super = cpu_p->Super; Map = cpu_p->Map; RelativeMap = cpu_p->RelativeMap; SuperMap = cpu_p->SuperMap; Head = cpu_p->Head; Next = cpu_p->Next; Next_save = cpu_p->Next_save; Lpos_save = cpu_p->Lpos_save; Lx = cpu_p->Lx; Ax = cpu_p->Ax; Az = cpu_p->Az; Fx = cpu_p->Fx; Fz = cpu_p->Fz; C = cpu_p->C; /* set timer pointers */ tstart = prof_p->f_start[deviceid]; tend = prof_p->f_end[deviceid]; syrk_time = prof_p->syrk_time[deviceid]; gemm_time = prof_p->gemm_time[deviceid]; potrf_time = prof_p->potrf_time[deviceid]; trsm_time = prof_p->trsm_time[deviceid]; syrk_flops = prof_p->syrk_flop[deviceid]; gemm_flops = prof_p->gemm_flop[deviceid]; potrf_flops = prof_p->potrf_flop[deviceid]; trsm_flops = prof_p->trsm_flop[deviceid]; #ifdef MKLROOT /* set mkl threads */ mkl_set_num_threads(numThreads); #else openblas_set_num_threads(numThreads); #endif /* clear global flops */ CLEAR1(syrk_flops,0); CLEAR1(gemm_flops,0); CLEAR1(potrf_flops,0); CLEAR1(trsm_flops,0); TIMER_START(tstart,0); /* loop over supernodes */ for(super = 0; super < nsuper; super++) { /* get supernode dimensiosn */ s = super; k1 = Super [s] ; /* s contains columns k1 to k2-1 of L */ k2 = Super [s+1] ; nscol = k2 - k1 ; /* # of columns in all of s */ psi = Lpi [s] ; /* pointer to first row of s in Ls */ psx = Lpx [s] ; /* pointer to first row of s in Lx */ psend = Lpi [s+1] ; /* pointer just past last row of s in Ls */ nsrow = psend - psi ; /* # of rows in all of s */ pend = psx + nsrow * nscol ; /* s is nsrow-by-nscol */ pk = psx ; SUM(potrf_flops,0,(double)(nscol*nscol*nscol/3.0)); SUM(trsm_flops,0,(double)((nsrow-nscol)*nscol*nscol)); TIMER_START(tstart,3); /* construct the scattered Map for supernode s */ #pragma omp parallel for num_threads(numThreads) if ( nsrow > 128 ) for (k = 0 ; k < nsrow ; k++) Map [Ls [psi + k]] = k ; /* copy matrix into supernode s (lower triangular part only) */ #pragma omp parallel for private ( p, pend, pfend, pf, i, j, imap, q ) num_threads(numThreads) if ( k2-k1 > 64 ) for (k = k1 ; k < k2 ; k++) { /* copy the kth column of A into the supernode */ if (stype != 0) { p = Ap [k] ; pend = (Apacked) ? (Ap [k+1]) : (p + Anz [k]) ; for ( ; p < pend ; p++) { i = Ai [p] ; if (i >= k) { imap = Map [i] ; /* row i of L is located in row Map [i] of s */ if (imap >= 0 && imap < nsrow) { L_ASSIGN (Lx,(imap+(psx+(k-k1)*nsrow)), Ax,Az,p) /* Lx [Map [i] + pk] = Ax [p] ; */; } } } } /* copy the kth column of A*F into the supernode */ else { double fjk[2]; pf = Fp [k] ; pfend = (Fpacked) ? (Fp [k+1]) : (p + Fnz [k]) ; for ( ; pf < pfend ; pf++) { j = Fi [pf] ; L_ASSIGN (fjk,0, Fx,Fz,pf) ; /* fjk = Fx [pf] ; */ p = Ap [j] ; pend = (Apacked) ? (Ap [j+1]) : (p + Anz [j]) ; for ( ; p < pend ; p++) { i = Ai [p] ; if (i >= k) { imap = Map [i] ; if (imap >= 0 && imap < nsrow) { L_MULTADD (Lx,(imap+(psx+(k-k1)*nsrow)),Ax,Az,p, fjk) ; /* Lx [Map [i] + pk] += Ax [p] * fjk ; */ } } } } } } /* add beta (only real part) to the diagonal of the supernode, if nonzero */ if (beta [0] != 0.0) { pk = psx ; for (k = k1 ; k < k2 ; k++) { L_ASSEMBLE (Lx,pk, beta) ; /* Lx [pk] += beta [0] ; */ pk += nsrow + 1 ; /* advance to the next diagonal entry */ } } TIMER_END(tstart,tend,3); /* save/restore the list of supernodes */ if (!repeat_supernode) { for (d = Head [s] ; d != EMPTY ; d = Next [d]) { Lpos_save [d] = Lpos [d] ; Next_save [d] = Next [d] ; } } else { for (d = Head [s] ; d != EMPTY ; d = Next [d]) { Lpos [d] = Lpos_save [d] ; Next [d] = Next_save [d] ; } } dnext = Head[s]; /* loop over descendant d of supernode s */ while( (dnext != EMPTY) || (0) ) { d = dnext; /* get the size of supernode d */ kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */ kd2 = Super [d+1] ; ndcol = kd2 - kd1 ; /* # of columns in all of d */ pdi = Lpi [d] ; /* pointer to first row of d in Ls */ pdx = Lpx [d] ; /* pointer to first row of d in Lx */ pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */ ndrow = pdend - pdi ; /* # rows in all of d */ /* find the range of rows of d that affect rows k1 to k2-1 of s */ p = Lpos [d] ; /* offset of 1st row of d affecting s */ pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */ pdx1 = pdx + p ; /* ptr to 1st row of d affecting s in Lx */ for (pdi2 = pdi1 ; pdi2 < pdend && Ls [pdi2] < k2 ; (pdi2)++) ; ndrow1 = pdi2 - pdi1 ; /* # rows in first part of d */ ndrow2 = pdend - pdi1 ; /* # rows in remaining d */ /* construct the update matrix C for this supernode d */ ndrow3 = ndrow2 - ndrow1 ; /* number of rows of C2 */ SUM(syrk_flops,0,(double)(ndrow1*ndrow1*ndcol)); SUM(gemm_flops,0,(double)(2*(ndrow2-ndrow1)*ndrow1*ndcol)); /* * Supernode Assembly * * Assemble the supernode with the following steps: * * 1. perform dsyrk * 2. perform dgemm * 3. perform addUpdate * */ TIMER_START1(tstart1); #ifdef REAL /* dsyrk */ BLAS_dsyrk ("L", "N", ndrow1, ndcol, /* N, K: L1 is ndrow1-by-ndcol*/ one, /* ALPHA: 1 */ Lx + L_ENTRY*pdx1, ndrow, /* A, LDA: L1, ndrow */ zero, /* BETA: 0 */ C, ndrow2) ; /* C, LDC: C1 */ #else BLAS_zherk ("L", "N", ndrow1, ndcol, /* N, K: L1 is ndrow1-by-ndcol*/ one, /* ALPHA: 1 */ Lx + L_ENTRY*pdx1, ndrow, /* A, LDA: L1, ndrow */ zero, /* BETA: 0 */ C, ndrow2) ; /* C, LDC: C1 */ #endif TIMER_END1(tstart1,syrk_time,0); /* dgemm */ TIMER_START1(tstart1); if (ndrow3 > 0) { #ifdef REAL BLAS_dgemm ("N", "C", ndrow3, ndrow1, ndcol, /* M, N, K */ one, /* ALPHA: 1 */ Lx + L_ENTRY*(pdx1 + ndrow1), /* A, LDA: L2 */ ndrow, /* ndrow */ Lx + L_ENTRY*pdx1, /* B, LDB: L1 */ ndrow, /* ndrow */ zero, /* BETA: 0 */ C + L_ENTRY*ndrow1, /* C, LDC: C2 */ ndrow2) ; #else BLAS_zgemm ("N", "C", ndrow3, ndrow1, ndcol, /* M, N, K */ one, /* ALPHA: 1 */ Lx + L_ENTRY*(pdx1 + ndrow1), /* A, LDA: L2 */ ndrow, /* ndrow */ Lx + L_ENTRY*pdx1, /* B, LDB: L1, ndrow */ ndrow, zero, /* BETA: 0 */ C + L_ENTRY*ndrow1, /* C, LDC: C2 */ ndrow2) ; #endif } TIMER_END1(tstart1,gemm_time,0); TIMER_START(tstart,4); /* construct relative map to assemble d into s */ #pragma omp parallel for num_threads(numThreads) if ( ndrow2 > 64 ) for (i = 0 ; i < ndrow2 ; i++) RelativeMap [i] = Map [Ls [pdi1 + i]] ; /* assemble C into supernode s using the relative map */ #pragma omp parallel for private ( j, i, px, q ) num_threads(numThreads) if (ndrow1 > 64 ) for (j = 0 ; j < ndrow1 ; j++) /* cols k1:k2-1 */ { px = psx + RelativeMap [j] * nsrow ; for (i = j ; i < ndrow2 ; i++) /* rows k1:n-1 */ { q = px + RelativeMap [i] ; L_ASSEMBLESUB (Lx,q, C, i+ndrow2*j) ; /* Lx [px + RelativeMap [i]] -= C [i + pj] ; */ } } TIMER_END(tstart,tend,4); /* prepare this supernode d for its next ancestor */ dnext = Next [d] ; if (!repeat_supernode) { Lpos [d] = pdi2 - pdi ; if (Lpos [d] < ndrow) { dancestor = SuperMap [Ls [pdi2]] ; /* place d in the link list of its next ancestor */ //#pragma omp critical (head_next) { Next [d] = Head [dancestor] ; Head [dancestor] = d ; } } } } /* end of descendant supernode loop */ /* * Cholesky Factorization * * Factorize diagonal block of spuernode s in LL' in the following steps: * 1. perform dpotrf * */ TIMER_START1(tstart1); nscol2 = (repeat_supernode) ? (nscol3) : (nscol) ; #ifdef REAL LAPACK_dpotrf ("L", nscol2, /* N: nscol2 */ Lx + L_ENTRY*psx, nsrow, /* A, LDA: S1, nsrow */ info) ; /* INFO */ #else LAPACK_zpotrf ("L", nscol2, /* N: nscol2 */ Lx + L_ENTRY*psx, nsrow, /* A, LDA: S1, nsrow */ info) ; /* INFO */ #endif TIMER_END1(tstart1,potrf_time,0); /* check if the matrix is not positive definite */ if (repeat_supernode) { /* the leading part has been refactorized; it must have succeeded */ info = 0 ; /* zero out the rest of this supernode */ p = psx + nsrow * nscol3 ; pend = psx + nsrow * nscol ; /* s is nsrow-by-nscol */ for ( ; p < pend ; p++) { /* Lx [p] = 0 ; */ L_CLEAR (Lx,p) ; } } /* info is set to one in LAPACK_*potrf if blas_ok is FALSE. It is * set to zero in dpotrf/zpotrf if the factorization was successful. */ if (CHECK_BLAS_INT && !Common->blas_ok) { ERROR (CHOLMOD_TOO_LARGE, "problem too large for the BLAS") ; } /* check if the matrix is not positive definite */ if (info != 0) { /* Matrix is not positive definite. dpotrf/zpotrf do NOT report an * error if the diagonal of L has NaN's, only if it has a zero. */ if (Common->status == CHOLMOD_OK) { ERROR (CHOLMOD_NOT_POSDEF, "matrix not positive definite") ; } /* L->minor is the column of L that contains a zero or negative * diagonal term. */ L->minor = k1 + info - 1 ; /* clear the link lists of all subsequent supernodes */ for (ss = s+1 ; ss < L->nsuper ; ss++) { Head [ss] = EMPTY ; } /* zero this supernode, and all remaining supernodes */ pend = L->xsize ; for (p = psx ; p < pend ; p++) { /* Lx [p] = 0. ; */ L_CLEAR (Lx,p) ; } /* If L is indefinite, it still contains useful information. * Supernodes 0 to s-1 are valid, similar to MATLAB [R,p]=chol(A), * where the 1-based p is identical to the 0-based L->minor. Since * L->minor is in the current supernode s, it and any columns to the * left of it in supernode s are also all zero. This differs from * [R,p]=chol(A), which contains nonzero rows 1 to p-1. Fix this * by setting repeat_supernode to TRUE, and repeating supernode s. * * If Common->quick_return_if_not_posdef is true, then the entire * supernode s is not factorized; it is left as all zero. */ if (info == 1 || Common->quick_return_if_not_posdef) { /* If the first column of supernode s contains a zero or * negative diagonal entry, then it is already properly set to * zero. Also, info will be 1 if integer overflow occured in * the BLAS. */ Head [s] = EMPTY ; return Common->status;/*(Common->status >= CHOLMOD_OK)*/ } else { /* Repeat supernode s, but only factorize it up to but not * including the column containing the problematic diagonal * entry. */ repeat_supernode = TRUE ; s-- ; nscol3 = info - 1 ; continue ; } } /* * Triangular Solve * * Compute the subdiagonal block in the following steps: * 1. perform dtrsm * 2. copy result back into factor Lx * 3. prepare next supernode * */ nsrow2 = nsrow - nscol2 ; if (nsrow2 > 0) { /* The current supernode is columns k1 to k2-1 of L. Let L1 be the * diagonal block (factorized by dpotrf/zpotrf above; rows/cols * k1:k2-1), and L2 be rows k2:n-1 and columns k1:k2-1 of L. The * triangular system to solve is L2*L1' = S2, where S2 is * overwritten with L2. More precisely, L2 = S2 / L1' in MATLAB * notation. */ /* dtrsm */ TIMER_START1(tstart1); #ifdef REAL BLAS_dtrsm ("R", "L", "C", "N", nsrow2, nscol2, /* M, N */ one, /* ALPHA: 1 */ Lx + L_ENTRY*psx, nsrow, /* A, LDA: L1, nsrow */ Lx + L_ENTRY*(psx + nscol2), /* B, LDB, L2, nsrow */ nsrow) ; #else BLAS_ztrsm ("R", "L", "C", "N", nsrow2, nscol2, /* M, N */ one, /* ALPHA: 1 */ Lx + L_ENTRY*psx, nsrow, /* A, LDA: L1, nsrow */ Lx + L_ENTRY*(psx + nscol2), /* B, LDB, L2, nsrow */ nsrow) ; #endif TIMER_END1(tstart1,trsm_time,0); if (CHECK_BLAS_INT && !Common->blas_ok) { ERROR (CHOLMOD_TOO_LARGE, "problem too large for the BLAS") ; } /* prepare for next supernode */ if (!repeat_supernode) { /* Lpos [s] is offset of first row of s affecting its parent */ Lpos [s] = nscol ; sparent = SuperMap [Ls [psi + nscol]] ; /* place s in link list of its parent */ //#pragma omp critical (head_next) { Next [s] = Head [sparent] ; Head [sparent] = s ; } //Head[s] = EMPTY ; } } Head [s] = EMPTY ; /* link list for supernode s no longer needed */ if (repeat_supernode) { /* matrix is not positive definite; finished clean-up for supernode * containing negative diagonal */ return Common->status;/*(Common->status >= CHOLMOD_OK)*/ } } /* end loop over supenodes */ TIMER_END(tstart,tend,0); /* print overall benchmarks */ PRINTF("\n\nElimination tree benchmarks:\n"); PRINTF("\n- time -\n"); PRINTFV("total: %f\n",tend[0]); PRINTFV("initLx: %f\n",tend[3]); PRINTFV("dsyrk: %f\n",syrk_time[0]); PRINTFV("dgemm: %f\n",gemm_time[0]); PRINTFV("assembly: %f\n",tend[4]); PRINTFV("dpotrf: %f\n",potrf_time[0]); PRINTFV("dtrsm: %f\n",trsm_time[0]); PRINTF("\n- flops -\n"); PRINTFV("dsyrk: %f\n",1.0e-9*syrk_flops[0]/syrk_time[0]); PRINTFV("dgemm: %f\n",1.0e-9*gemm_flops[0]/gemm_time[0]); PRINTFV("dpotrf: %f\n",1.0e-9*potrf_flops[0]/potrf_time[0]); PRINTFV("dtrsm: %f\n",1.0e-9*trsm_flops[0]/trsm_time[0]); PRINTF("\n"); /* return ok */ return Common->status;/*(Common->status >= CHOLMOD_OK)*/ } /* #undef REAL #undef COMPLEX #undef ZOMPLEX */ /* #undef REAL #undef COMPLEX #undef ZOMPLEX */
GB_unop__tan_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__tan_fc64_fc64) // op(A') function: GB (_unop_tran__tan_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = ctan (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ctan (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = ctan (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TAN || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__tan_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ctan (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ctan (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__tan_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__rdiv_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_03__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int8) // A*D function (colscale): GB (_AxD__rdiv_int8) // D*A function (rowscale): GB (_DxB__rdiv_int8) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int8) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int8) // C=scalar+B GB (_bind1st__rdiv_int8) // C=scalar+B' GB (_bind1st_tran__rdiv_int8) // C=A+scalar GB (_bind2nd__rdiv_int8) // C=A'+scalar GB (_bind2nd_tran__rdiv_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 8) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_IDIV_SIGNED (y, x, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_INT8 || GxB_NO_RDIV_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rdiv_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = GB_IDIV_SIGNED (bij, x, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = GB_IDIV_SIGNED (y, aij, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_SIGNED (aij, x, 8) ; \ } GrB_Info GB (_bind1st_tran__rdiv_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_SIGNED (y, aij, 8) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
remote.h
#pragma once #include "adabs/adabs.h" #include "adabs/pgas_addr.h" #include "adabs/memcpy.h" #include "adabs/tools/ptr_divider.h" namespace adabs { template <typename T> class local; /** * Maybe BYTEWISE COPIED after the copy constructor! * Make sure the copy will work fine even on different nodes */ template <typename T> class remote { public: typedef T value_type; private: int _x; int _y; int _batch_size_x; int _batch_size_y; pgas_addr<T> _data; mutable pgas_addr<T>** _datas; public: template <typename cpy_distT> remote(const cpy_distT& cpy_dist) : _x(cpy_dist.get_size_x()), _y(cpy_dist.get_size_y()), _batch_size_x(cpy_dist.get_batch_size_x()), _batch_size_y(cpy_dist.get_batch_size_y()), _data(cpy_dist.get_data_addr()), _datas(0) { assert (_x%_batch_size_x == 0); assert (_y%_batch_size_y == 0); } remote(const remote<T> &cpy) : _x(cpy.get_size_x()), _y(cpy.get_size_y()), _batch_size_x(cpy.get_batch_size_x()), _batch_size_y(cpy.get_batch_size_y()), _data(cpy._data), _datas(0) { } ~remote() { if (_datas != 0) { for (int i=0; i<(_x/_batch_size_x) * (_y/_batch_size_y); ++i) delete _datas[i]; } delete[] _datas; } private: public: const pgas_addr<T>& get_data_addr() const { return _data; } T* get_data_unitialized(const int x, const int y=1) { assert (x%_batch_size_x == 0); assert (y%_batch_size_y == 0); const int offset = get_offset(x,y); #pragma omp critical (remote_cache) { if (_datas == 0) init(); if (_datas[offset] == 0) _datas[offset] = new pgas_addr<T>((*_datas[0]) + offset); } return _datas[offset]->get_data_unitialized(); } const T* get_data(const int x, const int y=1) const { assert (x%_batch_size_x == 0); assert (y%_batch_size_y == 0); const int offset = get_offset(x,y); #pragma omp critical (remote_cache) { if (_datas == 0) init(); if (_datas[offset] == 0) _datas[offset] = new pgas_addr<T>((*_datas[0]) + offset); } return _datas[offset]->get_data(); } void set_data(const int x, T const * restrict const ptr) { assert (x%_batch_size_x == 0); assert (_datas != 0); const int offset = get_offset(x,1); _datas[offset]->set_data(ptr); } void set_data(const int x, const int y, T* ptr) { assert (x%_batch_size_x == 0); assert (y%_batch_size_y == 0); assert (_datas != 0); const int offset = get_offset(x,y); _datas[offset]->set_data(ptr); } int get_size_x() const { return _x; } int get_size_y() const { return _y; } int get_batch_size_x() const { return _batch_size_x; } int get_batch_size_y() const { return _batch_size_y; } void wait_for_complete() const { using namespace adabs::tools; pgas_addr<T> temp = get_data_addr() + 1; const int stride = (char*)temp.get_orig_flag() - (char*)get_data_addr().get_orig_flag(); // check if remote data is available volatile int done = 0; GASNET_CALL(gasnet_AMRequestShort6(get_data_addr().get_node(), adabs::impl::PGAS_ADDR_CHECK_GET_ALL, get_low(get_data_addr().get_orig_flag()), get_high(get_data_addr().get_orig_flag()), stride, local_size()/_batch_size_x/_batch_size_y, get_low(&done), get_high(&done) ) ) while (done != 1) {} } void check_empty() const { using namespace adabs::tools; pgas_addr<T> temp = get_data() + 1; const int stride = (char*)temp.get_orig_flag() - (char*)get_data().get_orig_flag(); // check if remote data is still empty volatile int done = 0; GASNET_CALL(gasnet_AMRequestShort6(_data.get_node(), adabs::impl::PGAS_ADDR_GET_UNINIT, get_low(_data.get_orig_flag()), get_high(_data.get_orig_flag()), stride, local_size()/_batch_size_x/_batch_size_y, get_low(&done), get_high(&done) ) ) while (done != 1) {} } remote<T>& operator=(const local<T> &rhs) { rhs.wait_for_complete(); // TODO add options to disable checks check_empty(); adabs::memcpy(_data, rhs.get_data(), local_size()/_batch_size_x/_batch_size_y); return *this; } pgas_addr<T>& get_data() const { return const_cast<pgas_addr<T>&>(_data); } private: void init() const { assert(_datas == 0); _datas = new pgas_addr<T>*[(_x/_batch_size_x) * (_y/_batch_size_y)]; _datas[0] = new pgas_addr<T>(_data); for (int i=1; i<(_x/_batch_size_x) * (_y/_batch_size_y); ++i) _datas[i] = 0; assert (_datas != 0); } int local_size() const { return _x*_y; } bool is_local(const int x, const int y) const { return get_node(x,y) == adabs::me; } int get_node(const int x, const int y) const { return _data.get_node(); } int get_local_x(const int x) const { return x; } int get_local_y(const int y) const { return y; } int get_offset(const int x, const int y) const { if (_y==1) return x / _batch_size_x; else return (x/_batch_size_x + (_x/_batch_size_x)*(y/_batch_size_y)); } }; }
spectralnorm-5.c
/* The Computer Language Benchmarks Game * http://benchmarksgame.alioth.debian.org/ * * contributed by Ledrug * algorithm is a straight copy from Steve Decker et al's Fortran code * with GCC SSE2 intrinsics */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <malloc.h> #include <emmintrin.h> inline double A(int i, int j) { return ((i+j) * (i+j+1) / 2 + i + 1); } double dot(double * v, double * u, int n) { int i; double sum = 0; for (i = 0; i < n; i++) sum += v[i] * u[i]; return sum; } void mult_Av(double * v, double * out, const int n) { int i; #pragma omp parallel for for (i = 0; i < n; i++) { __m128d sum = _mm_setzero_pd(); int j; for (j = 0; j < n; j += 2) { __m128d b = _mm_set_pd(v[j],v[j+1]); __m128d a = _mm_set_pd(A(i,j), A(i,j+1)); sum = _mm_add_pd(sum, _mm_div_pd(b, a)); } out[i] = sum[0] + sum[1]; } } void mult_Atv(double * v, double * out, const int n) { int i; #pragma omp parallel for for (i = 0; i < n; i++) { __m128d sum = _mm_setzero_pd(); int j; for (j = 0; j < n; j += 2) { __m128d b = _mm_set_pd(v[j], v[j+1]); __m128d a = _mm_set_pd(A(j,i), A(j+1,i)); sum = _mm_add_pd(sum, _mm_div_pd(b, a)); } out[i] = sum[0] + sum[1]; } } double *tmp; void mult_AtAv(double *v, double *out, const int n) { mult_Av(v, tmp, n); mult_Atv(tmp, out, n); } int main(int argc, char**argv) { int n = atoi(argv[1]); if (n <= 0) n = 2000; if (n & 1) n++; // make it multiple of two double *u, *v; u = memalign(16, n * sizeof(double)); v = memalign(16, n * sizeof(double)); tmp = memalign(16, n * sizeof(double)); int i; for (i = 0; i < n; i++) u[i] = 1; for (i = 0; i < 10; i++) { mult_AtAv(u, v, n); mult_AtAv(v, u, n); } printf("%.9f\n", sqrt(dot(u,v, n) / dot(v,v,n))); return 0; }
gradientCheck.h
#include <iostream> #include <list> #include <ctime> #include <cstdio> #include <tclap/CmdLine.h> #include <boost/algorithm/string/join.hpp> #include "param.h" #include "neuralClasses.h" //#include "graphClasses.h" #include "util.h" //#include "RBMDahlFunctions.h" #include "log_add.h" #include <cmath> #include <stdlib.h> typedef Node <Word_embeddings> word_node; typedef Node <Context_matrix> context_node; typedef Node <Hidden_layer> hidden_node; //#include "lossFunctions.h" //#include<tr1/random> #include <time.h> //#include <chrono> //#include <random> #include <Eigen/Dense> #include <Eigen/Core> #include <stdio.h> #include <iomanip> #include <boost/random/uniform_real_distribution.hpp> #include <boost/random/uniform_int_distribution.hpp> #include <boost/random/mersenne_twister.hpp> #include "maybe_omp.h" #include <math.h> #include <boost/unordered_map.hpp> #include <boost/functional.hpp> #include <stdlib.h> typedef boost::unordered_map<vector<int>, double> vector_map; typedef boost::unordered_map<int,vector_map > thread_vector_map; typedef Eigen::Matrix<double,Dynamic,Dynamic> RealMatrix; using namespace std; using namespace Eigen; using namespace boost::random; void inline fPropGradCheck(param & ,int ,int ,vector<word_node > &,vector<context_node > &,hidden_node &,context_node &,vector<Matrix<int,Dynamic,1> >&); double computeLossFunction(param & ,int ,int ,vector<word_node > &, vector<context_node > &,hidden_node &,context_node & , vector<Matrix<int,Dynamic,1> >&,vector_map & ,Matrix<double,Dynamic,Dynamic> & , Matrix<int,Dynamic,Dynamic> & ,vector<vector<double> > &,Output_word_embeddings & ); void initZero(param & ,vector<word_node > &,vector<context_node > &,hidden_node &, context_node & ); void gradientChecking(param & myParam,int minibatch_start_index,int current_minibatch_size,vector<word_node > &word_nodes, vector<context_node > &context_nodes,hidden_node &hidden_layer_node,context_node & hidden_layer_to_output_node, vector<Matrix<int,Dynamic,1> >&shuffled_training_data,vector_map &c_h,vector<uniform_real_distribution<> >& unif_real_vector, vector<mt19937> & eng_real_vector,vector<uniform_int_distribution<> > & unif_int_vector,vector<mt19937> & eng_int_vector, vector<vector<double> > &unigram_probs_vector,vector<vector<double> > & q_vector,vector<vector<int> >&J_vector,Output_word_embeddings & D_prime) { double delta_perturb = 0.000005; std::setprecision(20); //creating the gradient matrices RealMatrix gradient_input_W; RealMatrix gradient_output_W; Matrix<double,Dynamic,1>gradient_output_b; Matrix<double,Dynamic,1>gradient_h_bias; RealMatrix gradient_hidden_to_output_matrix; vector<RealMatrix> gradients_context_matrix; int ngram_size = myParam.ngram_size; int n_hidden = myParam.n_hidden; int n_vocab = myParam.n_vocab; int embedding_dimension = myParam.embedding_dimension; int minibatch_size = myParam.minibatch_size; int num_noise_samples = myParam.num_noise_samples; double normalization_init = myParam.normalization_init; gradient_input_W.setZero(myParam.n_vocab,myParam.embedding_dimension); gradient_output_W.setZero(myParam.n_vocab,myParam.embedding_dimension); gradient_output_b.setZero(myParam.n_vocab); gradient_h_bias.setZero(myParam.n_hidden); gradient_hidden_to_output_matrix.setZero(myParam.embedding_dimension,myParam.n_hidden); for (int word = 0;word<myParam.ngram_size-1;word++) { RealMatrix context_matrix_gradient; context_matrix_gradient.setZero(myParam.n_hidden,myParam.embedding_dimension); gradients_context_matrix.push_back(context_matrix_gradient); } ///////////////////FORWARD PROPAGATION////////////////////////// //////////////////////////////////////////////////////////////// fPropGradCheck(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node,shuffled_training_data); /////////////////COMPUTING THE NCE LOSS FUNCTION////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////// //computing the loss function //now computing the loss function //Matrix<double,Dynamic,Dynamic> output_gradient; Matrix<double,Dynamic,Dynamic> minibatch_predicted_embeddings(myParam.embedding_dimension,current_minibatch_size); Matrix<double,Dynamic,1> minibatch_positive_weights(current_minibatch_size); Matrix<double,Dynamic,Dynamic> minibatch_negative_weights(current_minibatch_size,myParam.num_noise_samples); Matrix<int,Dynamic,Dynamic> minibatch_negative_samples(current_minibatch_size,myParam.num_noise_samples); //int thread_id = 0; //for now, this is a proxy. When I'm multithreading this code, the thread ID will change //creating the unordered map for each thread //thread_vector_map c_h_gradient_vector; vector<vector_map> c_h_gradient_vector; vector_map c_h_gradient; for (int thread_id =0 ;thread_id<myParam.n_threads;thread_id++) { vector_map temp; c_h_gradient_vector.push_back(temp); } clock_t t; t = clock(); //c_h_gradient_vector += minibatch_positive_weights(train_id) //parallelizing the creation with multithreading Eigen::initParallel(); Eigen::setNbThreads(1); cout<<"staring the fprop"<<endl; #pragma omp parallel firstprivate(current_minibatch_size,minibatch_start_index,ngram_size,n_vocab,embedding_dimension, \ num_noise_samples,normalization_init) { #pragma omp for //schedule(dynamic) for (int train_id = 0;train_id < minibatch_size;train_id++) { int thread_id = omp_get_thread_num(); int output_word = shuffled_training_data[ngram_size-1](minibatch_start_index+train_id); //cout<<"output word is "<<output_word<<endl; Matrix<double,Dynamic,1> predicted_embedding = hidden_layer_to_output_node.fProp_matrix.col(train_id); vector<int> context;//(ngram_size-1); //creating the context for (int word = 0;word<ngram_size-1;word++) { //cout<<"train id is "<<train_id<<endl; //cout<<"minibatch start index is "<<minibatch_start_index<<endl; context.push_back(shuffled_training_data[word](minibatch_start_index+train_id)); cout<<"word "<<word<<" in context is "<<shuffled_training_data[word](minibatch_start_index+train_id)<<endl; //context.push_back((*thread_data_col_locations[thread_id][word])(minibatch_start_index+train_id)); } double log_inv_normalization_const_h = 0.; //getting a normalization constant and making it threasafe //this region does not need to be critical because its just a read log_inv_normalization_const_h = c_h[context]; double inv_normalization_const_h = exp(log_inv_normalization_const_h); //cout<<"The normalization constant is "<<inv_normalization_const_h<<endl; //setting the gradient for that context to 0; //double c_h_gradient_vector = 0.0; minibatch_predicted_embeddings.col(train_id) = predicted_embedding; double score = D_prime.W.row(output_word).dot(predicted_embedding) + D_prime.b(output_word); double unnorm_positive_prob = exp(score); minibatch_positive_weights(train_id) = num_noise_samples*unigram_probs_vector[thread_id][output_word]/ (unnorm_positive_prob*inv_normalization_const_h + num_noise_samples * unigram_probs_vector[thread_id][output_word]) ; if (c_h_gradient_vector[thread_id].find(context) == c_h_gradient_vector[thread_id].end()) { c_h_gradient_vector[thread_id][context] = minibatch_positive_weights(train_id); } else { //cout<<"we got a repeat!"<<endl; c_h_gradient_vector[thread_id][context] += minibatch_positive_weights(train_id); } ///COMPUTING NOISE SAMPLES/// for (int sample_id = 0;sample_id <num_noise_samples;sample_id++) { int mixture_component = unif_int_vector[thread_id](eng_int_vector[thread_id]); //cout<<"mixture component was "<<mixture_component<<endl; double p = unif_real_vector[thread_id](eng_real_vector[thread_id]); int sample ; //cout<<"computing sample"<<endl; //cout<<"remaining bernoulli item is "<<J_vector[thread_id][mixture_component]<<endl; if (q_vector[thread_id][mixture_component] >= p) { //cout<<"mixture accepted"<<endl; sample = mixture_component; } else { //cout<<"J accepted "<<endl; sample = J_vector[thread_id][mixture_component]; } //vector<int> context(ngram_size-1); //cout<<"the sample was "<<sample<<endl; assert (sample >= 0); minibatch_negative_samples(train_id,sample_id) = sample; double negative_score = D_prime.W.row(sample).dot(predicted_embedding) + D_prime.b(sample); double negative_unnorm_prob = exp(negative_score); minibatch_negative_weights(train_id,sample_id) = negative_unnorm_prob*inv_normalization_const_h/ (negative_unnorm_prob*inv_normalization_const_h + num_noise_samples * unigram_probs_vector[thread_id][sample]); c_h_gradient_vector[thread_id][context] -= minibatch_negative_weights(train_id,sample_id); } } } #pragma omp barrier /////////////////////////////////UPDATING GRADIENTS AND DOING BACKPROPAGATION///////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// //t = clock(); //updating the normalization constants for (int thread_id=0;thread_id<myParam.n_threads;thread_id++) { vector_map::iterator it; for (it = c_h_gradient_vector[thread_id].begin();it != c_h_gradient_vector[thread_id].end();it++) { if (c_h_gradient.find((*it).first) == c_h_gradient.end()) { c_h_gradient[(*it).first] = (*it).second; } else { c_h_gradient[(*it).first] += (*it).second; } } } //cout<<"the time taken to update normalization constants was "<<clock()-t<<endl; //t = clock(); //first comput the backprop gradient Matrix<double,Dynamic,Dynamic> context_bProp_matrix;//(myParam.embedding_dimension,current_minibatch_size); context_bProp_matrix.setZero(myParam.embedding_dimension,current_minibatch_size); D_prime.bProp(shuffled_training_data[myParam.ngram_size-1],minibatch_positive_weights, minibatch_negative_samples,minibatch_negative_weights, context_bProp_matrix,minibatch_start_index,current_minibatch_size,myParam.num_noise_samples); //cout<<"the time taken to do bprop on the output layer was "<<clock()-t<<endl; //now then update the parameters //t = clock(); D_prime.computeGradientCheck(minibatch_predicted_embeddings,shuffled_training_data[myParam.ngram_size-1],minibatch_positive_weights, minibatch_negative_samples,minibatch_negative_weights, minibatch_start_index,current_minibatch_size,myParam.num_noise_samples,gradient_output_W,gradient_output_b); //cout<<"the time taken to compute the gradient on the output layer was "<<clock()-t<<endl; //now doing backprop on hidden layer to output matrix hidden_layer_to_output_node.param->bProp(context_bProp_matrix,hidden_layer_to_output_node.bProp_matrix); hidden_layer_to_output_node.param->computeGradientCheckOmp(context_bProp_matrix,hidden_layer_node.fProp_matrix,gradient_hidden_to_output_matrix); //now doing backprop on the hidden node hidden_layer_node.param->bPropTanh(hidden_layer_to_output_node.bProp_matrix,hidden_layer_node.bProp_matrix,hidden_layer_node.fProp_matrix); hidden_layer_node.param->computeGradientCheckTanh(hidden_layer_to_output_node.bProp_matrix,hidden_layer_node.fProp_matrix,gradient_h_bias); //now doing backprop on the context matrices for (int word = 0;word<myParam.ngram_size-1;word++) { context_nodes[word].param->bProp(hidden_layer_node.bProp_matrix,context_nodes[word].bProp_matrix); //updating the context weights context_nodes[word].param->computeGradientCheckOmp(hidden_layer_node.bProp_matrix,word_nodes[word].fProp_matrix,gradients_context_matrix[word]); } //doing backprop on the word embeddings for (int word = 0;word < myParam.ngram_size-1;word++) { //cout<<"the backprop matrix from the context "<<word<<" before doing word updates is "<<context_nodes[word].bProp_matrix<<endl; //getchar(); word_nodes[word].param->computeGradientCheck(context_nodes[word].bProp_matrix,shuffled_training_data[word],minibatch_start_index, current_minibatch_size,gradient_input_W); } //compute the NCE LOSS FUNTION double current_nce_loss = computeLossFunction(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node, hidden_layer_to_output_node,shuffled_training_data,c_h,minibatch_predicted_embeddings,minibatch_negative_samples, unigram_probs_vector,D_prime); cout<<"the current nce loss is "<<setprecision(10)<<current_nce_loss<<endl; //for all the nodes in the graph, I have to set the bprop and fprop matrices to zero /* hidden_layer_node.bProp_matrix.setZero(); hidden_layer_node.fProp_matrix.setZero(); hidden_layer_to_output_node.fProp_matrix.setZero(); hidden_layer_to_output_node.bProp_matrix.setZero(); for (int word = 0;word<myParam.ngram_size-1;word++) { context_nodes[word].fProp_matrix.setZero(); context_nodes[word].bProp_matrix.setZero(); word_nodes[word].fProp_matrix.setZero(); //word_nodes[word].bProp_matrix.setZero(); } */ //initZero(myParam,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node); //now that we have the gradients, we check our gradients using finite differences ////COMPUTING THE LOSS FUNCTION//////////////////////////// //randomly pick up some parameters whose gradient you want to inspect //first pick some random examples from the minibatch //srand (time(NULL)); //cout<<"the current minibatch size is "<<current_minibatch_size<<endl; //cout<<"max is "<<min(4,current_minibatch_size)<<endl; getchar(); for (int example = 0;example <min(4,current_minibatch_size) ;example++) { //checking the gradient of the normalization constant cout<<"the example is "<<example<<endl; vector<int> context;//(ngram_size-1); //creating the context for (int word = 0;word<ngram_size-1;word++) { context.push_back(shuffled_training_data[word](minibatch_start_index+example)); cout<<"context word "<<word<<" is "<<shuffled_training_data[word](minibatch_start_index+example)<<endl; //context.push_back((*thread_data_col_locations[thread_id][word])(minibatch_start_index+train_id)); } c_h[context] +=delta_perturb; double perturbed_nce_loss = computeLossFunction(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node, hidden_layer_to_output_node,shuffled_training_data,c_h,hidden_layer_to_output_node.fProp_matrix,minibatch_negative_samples, unigram_probs_vector,D_prime); double finite_difference_gradient = (perturbed_nce_loss-current_nce_loss)/delta_perturb; double difference = finite_difference_gradient - c_h_gradient[context]; cout<<"the original gradient is "<<c_h_gradient[context]<<endl; cout<<"the finite difference gradient is "<<finite_difference_gradient<<endl; cout<<"the ratio is "<<c_h_gradient[context]/finite_difference_gradient<<endl; cout<<"the difference for c_h was "<<abs(difference)<<endl; c_h[context] -= delta_perturb; getchar(); //checking the gradient of the hidden layer to output node context matrix initZero(myParam,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node); int row_perturb_dimension = rand() % n_hidden; cout<<"the row perturb dimension was "<<row_perturb_dimension<<endl; int col_perturb_dimension = rand() % embedding_dimension; cout<<"the col perturb dimension was "<<col_perturb_dimension<<endl; //first perturb //cout<<"before perturbation the dimension was "<< context_nodes[word].param->U(row_perturb_dimension,col_perturb_dimension)<<endl; hidden_layer_to_output_node.param->U(row_perturb_dimension,col_perturb_dimension) += delta_perturb; //cout<<"after perturbation the dimension was "<< context_nodes[word].param->U(row_perturb_dimension,col_perturb_dimension)<<endl; //then do fprop fPropGradCheck(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node,shuffled_training_data); //then compute NCE loss function perturbed_nce_loss = computeLossFunction(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node, hidden_layer_to_output_node,shuffled_training_data,c_h,hidden_layer_to_output_node.fProp_matrix,minibatch_negative_samples, unigram_probs_vector,D_prime); finite_difference_gradient = (perturbed_nce_loss-current_nce_loss)/delta_perturb; difference = finite_difference_gradient - gradient_hidden_to_output_matrix(row_perturb_dimension,col_perturb_dimension); cout<<"the ratio is "<<gradient_hidden_to_output_matrix(row_perturb_dimension,col_perturb_dimension)/finite_difference_gradient<<endl; cout<<"the difference for hidden to output context matrix was "<<abs(difference)<<endl; hidden_layer_to_output_node.param->U(row_perturb_dimension,col_perturb_dimension) -= delta_perturb; getchar(); //restoring the fprop to the original one initZero(myParam,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node); fPropGradCheck(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node,shuffled_training_data); int example_id = rand() % current_minibatch_size; //now pick that example , perturb, do fprop and check the gradient Matrix<double,1,Dynamic> perturb_vector(embedding_dimension); int perturb_dimension = rand() % embedding_dimension; cout<<"the perturb dimension was "<<perturb_dimension<<endl; int output_word = shuffled_training_data[myParam.ngram_size-1](minibatch_start_index+example_id); cout<<"the output word was "<<output_word<<endl; D_prime.W(output_word,perturb_dimension) += delta_perturb; perturbed_nce_loss = computeLossFunction(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node, hidden_layer_to_output_node,shuffled_training_data,c_h,hidden_layer_to_output_node.fProp_matrix,minibatch_negative_samples, unigram_probs_vector,D_prime); //cout<<"the perturbed nce loss was "<<perturbed_nce_loss<<endl; //cout<<"the finite difference gradient of the perturb dimension "<<perturb_dimension<<" was "<<(perturbed_nce_loss-current_nce_loss)/delta_perturb<<endl; finite_difference_gradient = (perturbed_nce_loss-current_nce_loss)/delta_perturb; difference = finite_difference_gradient - gradient_output_W(output_word,perturb_dimension); cout<<"the ratio is "<<gradient_output_W(output_word,perturb_dimension)/finite_difference_gradient<<endl; cout<<"the difference for output W was "<<abs(difference)<<endl; D_prime.W(output_word,perturb_dimension) -= delta_perturb; getchar(); //now checking the gradient for output bias D_prime.b(output_word) += delta_perturb; perturbed_nce_loss = computeLossFunction(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node, hidden_layer_to_output_node,shuffled_training_data,c_h,hidden_layer_to_output_node.fProp_matrix,minibatch_negative_samples, unigram_probs_vector,D_prime); finite_difference_gradient = (perturbed_nce_loss-current_nce_loss)/delta_perturb; difference = finite_difference_gradient - gradient_output_b(output_word); cout<<"the ratio is "<<gradient_output_b(output_word)/finite_difference_gradient<<endl; cout<<"the difference for output b was "<<abs(difference)<<endl; D_prime.b(output_word) -= delta_perturb; getchar(); //checking the gradient for one of the words in the noise samples int noise_word_id = rand()%num_noise_samples; cout<<"the noise word id was "<<noise_word_id<<endl; int noise_word = minibatch_negative_samples(example_id,noise_word_id); cout<<"the noise word was "<<noise_word<<endl; D_prime.W(noise_word,perturb_dimension) += delta_perturb; perturbed_nce_loss = computeLossFunction(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node, hidden_layer_to_output_node,shuffled_training_data,c_h,hidden_layer_to_output_node.fProp_matrix,minibatch_negative_samples, unigram_probs_vector,D_prime); finite_difference_gradient = (perturbed_nce_loss-current_nce_loss)/delta_perturb; difference = finite_difference_gradient - gradient_output_W(noise_word,perturb_dimension); cout<<"the ratio is "<<gradient_output_W(noise_word,perturb_dimension)/finite_difference_gradient<<endl; cout<<"the difference for output noise W was "<<abs(difference)<<endl; D_prime.W(noise_word,perturb_dimension) -= delta_perturb; getchar(); //now checking the gradient for hbias //cout<<"gradient h bias is "<<gradient_h_bias<<endl; initZero(myParam,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node); perturb_dimension = rand() % n_hidden; //cout<<"the perturb dimension is "<<perturb_dimension<<endl; //out<<"h bias before perturbing is "<<hidden_layer_node.param->h_bias<<endl; hidden_layer_node.param->h_bias(perturb_dimension) += delta_perturb; //cout<<"h bias after perturbing is "<<hidden_layer_node.param->h_bias<<endl; fPropGradCheck(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node,shuffled_training_data); perturbed_nce_loss = computeLossFunction(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node, hidden_layer_to_output_node,shuffled_training_data,c_h,hidden_layer_to_output_node.fProp_matrix,minibatch_negative_samples, unigram_probs_vector,D_prime); cout<<"the perturbed loss was "<<perturbed_nce_loss<<endl; finite_difference_gradient = (perturbed_nce_loss-current_nce_loss)/ delta_perturb; cout<<"the finited difference gradient for h bias was "<<finite_difference_gradient<<endl; difference = finite_difference_gradient -gradient_h_bias(perturb_dimension); cout<<"the ratio is "<<gradient_h_bias(perturb_dimension)/finite_difference_gradient<<endl; cout<<"the difference for h_bias was was "<<abs(difference)<<endl; hidden_layer_node.param->h_bias(perturb_dimension) -= delta_perturb; getchar(); for (int word = 0;word<myParam.ngram_size-1;word++) { cout<<"the word is "<<word<<endl; for (int num_perturb = 0;num_perturb<3;num_perturb++) { initZero(myParam,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node); perturb_dimension = rand() % embedding_dimension; cout<<"the perturb dimension was "<<perturb_dimension<<endl; //first perturb int input_word = shuffled_training_data[word](minibatch_start_index+example_id); //cout<<"the input word was "<<input_word<<endl; //cout<<"before perturbation the dimension was "<< word_nodes[word].param->W(input_word,perturb_dimension)<<endl; word_nodes[word].param->W(input_word,perturb_dimension) += delta_perturb; //cout<<"after perturbation the dimension was "<< word_nodes[word].param->W(input_word,perturb_dimension)<<endl; //then do fprop fPropGradCheck(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node,shuffled_training_data); //then compute NCE loss function double perturbed_nce_loss = computeLossFunction(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node, hidden_layer_to_output_node,shuffled_training_data,c_h,hidden_layer_to_output_node.fProp_matrix,minibatch_negative_samples, unigram_probs_vector,D_prime); //cout<<"the perturbed nce loss was "<<perturbed_nce_loss<<endl; finite_difference_gradient = (perturbed_nce_loss-current_nce_loss)/delta_perturb; difference = finite_difference_gradient - gradient_input_W(input_word,perturb_dimension); cout<<"the ratio is "<<gradient_input_W(input_word,perturb_dimension)/finite_difference_gradient<<endl; cout<<"the difference for input W was was "<<abs(difference)<<endl; //cout<<"the finite difference gradient of the perturb dimension "<<perturb_dimension<<" was "<<(perturbed_nce_loss-current_nce_loss)/delta_perturb<<endl; //cout<<"the gradient was "<<gradient_input_W(input_word,perturb_dimension); //unpurturbing word_nodes[word].param->W(input_word,perturb_dimension)-= delta_perturb; if (abs(difference) > 10E-6) { cout<<"the difference was greater than 10E-6 and the original paramter was "<<word_nodes[word].param->W(input_word,perturb_dimension)<<endl; } getchar(); } //now perturbing the U matrices and checking gradients via finite differences for (int num_perturb = 0;num_perturb<3;num_perturb++) { initZero(myParam,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node); int row_perturb_dimension = rand() % n_hidden; cout<<"the row perturb dimension was "<<row_perturb_dimension<<endl; int col_perturb_dimension = rand() % embedding_dimension; cout<<"the col perturb dimension was "<<col_perturb_dimension<<endl; //first perturb //cout<<"before perturbation the dimension was "<< context_nodes[word].param->U(row_perturb_dimension,col_perturb_dimension)<<endl; context_nodes[word].param->U(row_perturb_dimension,col_perturb_dimension) += delta_perturb; //cout<<"after perturbation the dimension was "<< context_nodes[word].param->U(row_perturb_dimension,col_perturb_dimension)<<endl; //then do fprop fPropGradCheck(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node,shuffled_training_data); //then compute NCE loss function double perturbed_nce_loss = computeLossFunction(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node, hidden_layer_to_output_node,shuffled_training_data,c_h,hidden_layer_to_output_node.fProp_matrix,minibatch_negative_samples, unigram_probs_vector,D_prime); finite_difference_gradient = (perturbed_nce_loss-current_nce_loss)/delta_perturb; difference = finite_difference_gradient - gradients_context_matrix[word](row_perturb_dimension,col_perturb_dimension); cout<<"the ratio is "<<gradients_context_matrix[word](row_perturb_dimension,col_perturb_dimension)/finite_difference_gradient<<endl; cout<<"the difference for context matrix was "<<abs(difference)<<endl; //cout<<"the perturbed nce loss was "<<perturbed_nce_loss<<endl; //cout<<"the finite difference gradient of the perturb dimension "<<perturb_dimension<<" was "<<(perturbed_nce_loss-current_nce_loss)/delta_perturb<<endl; //cout<<"the gradient was "<<gradients_context_matrix[word](row_perturb_dimension,col_perturb_dimension); //unpurturbing context_nodes[word].param->U(row_perturb_dimension,col_perturb_dimension) -= delta_perturb; getchar(); } } } initZero(myParam,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node); } void initZero(param & myParam,vector<word_node > &word_nodes,vector<context_node > &context_nodes,hidden_node &hidden_layer_node, context_node & hidden_layer_to_output_node) { //for all the nodes in the graph, I have to set the bprop and fprop matrices to zero hidden_layer_node.bProp_matrix.setZero(); hidden_layer_node.fProp_matrix.setZero(); hidden_layer_to_output_node.fProp_matrix.setZero(); hidden_layer_to_output_node.bProp_matrix.setZero(); for (int word = 0;word<myParam.ngram_size-1;word++) { context_nodes[word].fProp_matrix.setZero(); context_nodes[word].bProp_matrix.setZero(); word_nodes[word].fProp_matrix.setZero(); //word_nodes[word].bProp_matrix.setZero(); } } double computeLossFunction(param & myParam,int minibatch_start_index,int current_minibatch_size,vector<word_node > &word_nodes, vector<context_node > &context_nodes,hidden_node &hidden_layer_node,context_node & hidden_layer_to_output_node, vector<Matrix<int,Dynamic,1> >&shuffled_training_data,vector_map & c_h,Matrix<double,Dynamic,Dynamic> & minibatch_predicted_embeddings, Matrix<int,Dynamic,Dynamic> & minibatch_negative_samples,vector<vector<double> > &unigram_probs_vector,Output_word_embeddings & D_prime) { std::setprecision(9); int ngram_size = myParam.ngram_size; int n_vocab = myParam.n_vocab; int embedding_dimension = myParam.embedding_dimension; int minibatch_size = myParam.minibatch_size; int num_noise_samples = myParam.num_noise_samples; double normalization_init = myParam.normalization_init; //parallelizing the creation with multithreading Eigen::initParallel(); Eigen::setNbThreads(1); double minibatch_loss; #pragma omp parallel firstprivate(minibatch_size,minibatch_start_index,ngram_size,n_vocab,embedding_dimension, \ num_noise_samples,normalization_init) { #pragma omp for reduction(+:minibatch_loss) //schedule(dynamic) for (int train_id = 0;train_id < minibatch_size;train_id++) { int thread_id = omp_get_thread_num(); int output_word = shuffled_training_data[ngram_size-1](minibatch_start_index+train_id); //cout<<"output word is "<<output_word<<endl; Matrix<double,Dynamic,1> predicted_embedding = minibatch_predicted_embeddings.col(train_id); //cout<<"predicted embedding is "<<endl<<predicted_embedding<<endl; vector<int> context;//(ngram_size-1); //creating the context for (int word = 0;word<ngram_size-1;word++) { context.push_back(shuffled_training_data[word](minibatch_start_index+train_id)); //context.push_back((*thread_data_col_locations[thread_id][word])(minibatch_start_index+train_id)); } double log_inv_normalization_const_h = 0.; //getting a normalization constant and making it threasafe //this region does not need to be critical because its just a read log_inv_normalization_const_h = c_h[context]; double inv_normalization_const_h = exp(log_inv_normalization_const_h); //cout<<"The normalization constant is "<<inv_normalization_const_h<<endl; //setting the gradient for that context to 0; //double c_h_gradient_vector = 0.0; double score = D_prime.W.row(output_word).dot(predicted_embedding) + D_prime.b(output_word); //cout<<"the positive score is "<<score<<endl; double unnorm_positive_prob = exp(score); //cout<<"the unnorm positive prob is "<<unnorm_positive_prob<<endl; //cout<<"the unigram prob is "<<unigram_probs_vector[thread_id][output_word]<<endl; //cout<<"the positive prob is "<< double sample_loss = 0.; double positive_prob = (unnorm_positive_prob*inv_normalization_const_h/ (unnorm_positive_prob*inv_normalization_const_h + num_noise_samples * unigram_probs_vector[thread_id][output_word])); //cout<<"positive prob is "<<positive_prob<<endl; sample_loss = log(positive_prob); //cout<<"sample loss is "<<sample_loss<<endl; ///COMPUTING NOISE SAMPLES/// for (int sample_id = 0;sample_id <num_noise_samples;sample_id++) { int sample = minibatch_negative_samples(train_id,sample_id); assert (sample >= 0); double negative_score = D_prime.W.row(sample).dot(predicted_embedding) + D_prime.b(sample); //cout<<"the negative score is "<<negative_score<<endl; //cout<<"the sample was "<<sample<<endl; double negative_unnorm_prob = exp(negative_score); double negative_prob = num_noise_samples*unigram_probs_vector[thread_id][sample]/ (negative_unnorm_prob*inv_normalization_const_h + num_noise_samples * unigram_probs_vector[thread_id][sample]); //cout<<"negative prob is "<<negative_prob<<endl; sample_loss += log(negative_prob); } //cout<<"the sample loss is "<<sample_loss<<endl; minibatch_loss += sample_loss; } } #pragma omp barrier return(minibatch_loss); } void inline fPropGradCheck(param & myParam,int minibatch_start_index,int current_minibatch_size,vector<word_node > &word_nodes, vector<context_node > &context_nodes,hidden_node &hidden_layer_node,context_node & hidden_layer_to_output_node, vector<Matrix<int,Dynamic,1> >&data) { /////FORWARD PROPAGATION///////////// //doing forward propagation first with word nodes for (int word = 0;word<myParam.ngram_size-1;word++) { word_nodes[word].param->fPropOmp(data[word],word_nodes[word].fProp_matrix,minibatch_start_index,current_minibatch_size); } Eigen::setNbThreads(myParam.n_threads); //doing forward prop with the context nodes for (int word = 0;word<myParam.ngram_size-1;word++) { context_nodes[word].param->fProp(word_nodes[word].fProp_matrix,context_nodes[word].fProp_matrix); //cout<<"context fprop matrix was "<<context_nodes[word].fProp_matrix<<endl; } //doing forward prop with the hidden nodes hidden_layer_node.param->fPropTanh(context_nodes,hidden_layer_node.fProp_matrix); /* Matrix<double,Dynamic,Dynamic> hidden_layer_input(myParam.n_hidden,myParam.embedding_dimension); for (int word = 0;word<myParam.ngram_size-1;word++) { hidden_layer_node.param->fPropTanh(context_nodes[word].fProp_matrix,hidden_layer_node.fProp_matrix); } */ //cout<<"the hidden fprop matrix was "<<hidden_layer_node.fProp_matrix<<endl; //now doing forward prop with the hidden to output nodes hidden_layer_to_output_node.param->fProp(hidden_layer_node.fProp_matrix,hidden_layer_to_output_node.fProp_matrix); //cout<<"the hidden to output fprop matrix was "<<hidden_layer_to_output_node.fProp_matrix<<endl; }
trmv_x_sky_u_hi_trans.c
#include "alphasparse/kernel.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT r = 0; r < m; ++r) { const ALPHA_INT row_start = A->pointers[r]; const ALPHA_INT row_end = A->pointers[r + 1]; ALPHA_INT row_indx = 1; for(ALPHA_INT i = row_start; i < row_end; i++) { ALPHA_INT row_eles = row_end - row_start; ALPHA_INT c = r - row_eles + row_indx; if(i == row_end - 1) { alpha_madde(y[r], alpha, x[c]); } else { ALPHA_Number t; alpha_mul(t, alpha, A->values[i]); alpha_madde(y[r], t, x[c]); } row_indx ++; } } return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { return ONAME_omp(alpha, A, x, beta, y); }
gradfm_adj_mex.c
#include <inttypes.h> #include <omp.h> #include "mex.h" void gradfm_adjf(float *du, const float *x, const float *y, const float *z, const uint8_t *G, const double *h, const size_t *sz); void gradfm_adjd(double *du, const double *x, const double *y, const double *z, const uint8_t *G, const double *h, const size_t *sz); void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { if ((nrhs != 6) || (nlhs > 1)) { mexErrMsgTxt("Usage: gradfm_adj_mex(du, x, y, z, G, h);"); return; } const uint8_t *G = (const uint8_t *)mxGetData(prhs[4]); const double *h = (const double *)mxGetData(prhs[5]); const size_t *sz = (const size_t *)mxGetDimensions(prhs[0]); if (mxIsSingle(prhs[0])) { float *du = (float *)mxGetData(prhs[0]); const float *x = (const float *)mxGetData(prhs[1]); const float *y = (const float *)mxGetData(prhs[2]); const float *z = (const float *)mxGetData(prhs[3]); gradfm_adjf(du, x, y, z, G, h, sz); } else { double *du = (double *)mxGetData(prhs[0]); const double *x = (const double *)mxGetData(prhs[1]); const double *y = (const double *)mxGetData(prhs[2]); const double *z = (const double *)mxGetData(prhs[3]); gradfm_adjd(du, x, y, z, G, h, sz); } if (nlhs == 1) { plhs[0] = mxCreateDoubleScalar(1.0); } return; } void gradfm_adjf(float *du, const float *x, const float *y, const float *z, const uint8_t *G, const double *h, const size_t *sz) { size_t i, j, k; size_t l; float dx, dy, dz; const size_t nx = sz[0]; const size_t ny = sz[1]; const size_t nz = sz[2]; const size_t nxny = nx*ny; const size_t nxnynz = nx*ny*nz; const size_t NX = nx-1; const size_t NY = nx*(ny-1); const size_t NZ = nxny*(nz-1); const float hx = (float)(-1.0/h[0]); const float hy = (float)(-1.0/h[1]); const float hz = (float)(-1.0/h[2]); #pragma omp parallel for private(i,j,k,l) schedule(static) \ if(nxnynz > 16*16*16) for(k = 0; k < nxnynz; k += nxny) { for(j = 0; j < nxny; j += nx) { l = j + k; for(i = 0; i < nx; ++i, ++l) { if (G[l]) { dz = (k > 0) && G[l-nxny] ? hz*(z[l]-z[l-nxny]) : (k < NZ) && G[l+nxny] ? hz*(z[l+nxny]-z[l]) : 0.0f; dy = (j > 0) && G[l-nx] ? hy*(y[l]-y[l-nx]) : (j < NY) && G[l+nx] ? hy*(y[l+nx]-y[l]) : 0.0f; dx = (i > 0) && G[l-1] ? hx*(x[l]-x[l-1]) : (i < NX) && G[l+1] ? hx*(x[l+1]-x[l]) : 0.0f; du[l] = dx + dy + dz; } } } } return; } void gradfm_adjd(double *du, const double *x, const double *y, const double *z, const uint8_t *G, const double *h, const size_t *sz) { size_t i, j, k; size_t l; double dx, dy, dz; const size_t nx = sz[0]; const size_t ny = sz[1]; const size_t nz = sz[2]; const size_t nxny = nx*ny; const size_t nxnynz = nx*ny*nz; const size_t NX = nx-1; const size_t NY = nx*(ny-1); const size_t NZ = nxny*(nz-1); const double hx = -1.0/h[0]; const double hy = -1.0/h[1]; const double hz = -1.0/h[2]; #pragma omp parallel for private(i,j,k,l) schedule(static) \ if(nxnynz > 16*16*16) for(k = 0; k < nxnynz; k += nxny) { for(j = 0; j < nxny; j += nx) { l = j + k; for(i = 0; i < nx; ++i, ++l) { if (G[l]) { dz = (k > 0) && G[l-nxny] ? hz*(z[l]-z[l-nxny]) : (k < NZ) && G[l+nxny] ? hz*(z[l+nxny]-z[l]) : 0.0; dy = (j > 0) && G[l-nx] ? hy*(y[l]-y[l-nx]) : (j < NY) && G[l+nx] ? hy*(y[l+nx]-y[l]) : 0.0; dx = (i > 0) && G[l-1] ? hx*(x[l]-x[l-1]) : (i < NX) && G[l+1] ? hx*(x[l+1]-x[l]) : 0.0; du[l] = dx + dy + dz; } } } } return; }
Stmt.h
//===--- Stmt.h - Classes for representing statements -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <string> namespace llvm { class FoldingSetNodeID; } namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class IdentifierInfo; class LabelDecl; class ParmVarDecl; class PrinterHelper; struct PrintingPolicy; class QualType; class RecordDecl; class SourceManager; class StringLiteral; class SwitchStmt; class Token; class VarDecl; //===--------------------------------------------------------------------===// // ExprIterator - Iterators for iterating over Stmt* arrays that contain // only Expr*. This is needed because AST nodes use Stmt* arrays to store // references to children (to be compatible with StmtIterator). //===--------------------------------------------------------------------===// class Stmt; class Expr; class ExprIterator { Stmt** I; public: ExprIterator(Stmt** i) : I(i) {} ExprIterator() : I(nullptr) {} ExprIterator& operator++() { ++I; return *this; } ExprIterator operator-(size_t i) { return I-i; } ExprIterator operator+(size_t i) { return I+i; } Expr* operator[](size_t idx); // FIXME: Verify that this will correctly return a signed distance. signed operator-(const ExprIterator& R) const { return I - R.I; } Expr* operator*() const; Expr* operator->() const; bool operator==(const ExprIterator& R) const { return I == R.I; } bool operator!=(const ExprIterator& R) const { return I != R.I; } bool operator>(const ExprIterator& R) const { return I > R.I; } bool operator>=(const ExprIterator& R) const { return I >= R.I; } }; class ConstExprIterator { const Stmt * const *I; public: ConstExprIterator(const Stmt * const *i) : I(i) {} ConstExprIterator() : I(nullptr) {} ConstExprIterator& operator++() { ++I; return *this; } ConstExprIterator operator+(size_t i) const { return I+i; } ConstExprIterator operator-(size_t i) const { return I-i; } const Expr * operator[](size_t idx) const; signed operator-(const ConstExprIterator& R) const { return I - R.I; } const Expr * operator*() const; const Expr * operator->() const; bool operator==(const ConstExprIterator& R) const { return I == R.I; } bool operator!=(const ConstExprIterator& R) const { return I != R.I; } bool operator>(const ConstExprIterator& R) const { return I > R.I; } bool operator>=(const ConstExprIterator& R) const { return I >= R.I; } }; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: void* operator new(size_t bytes) throw() { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void* data) throw() { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } class StmtBitfields { friend class Stmt; /// \brief The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class CompoundStmtBitfields { friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; }; class ExprBitfields { friend class Expr; friend class DeclRefExpr; // computeDependence friend class InitListExpr; // ctor friend class DesignatedInitExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class ASTStmtReader; // deserialization friend class CXXNewExpr; // ctor friend class DependentScopeDeclRefExpr; // ctor friend class CXXConstructExpr; // ctor friend class CallExpr; // ctor friend class OffsetOfExpr; // ctor friend class ObjCMessageExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ShuffleVectorExpr; // ctor friend class ParenListExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class OverloadExpr; // ctor friend class PseudoObjectExpr; // ctor friend class AtomicExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 2; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = 16 }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 2; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 2; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class DeclRefExprBitfields { friend class DeclRefExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingLocal : 1; }; class CastExprBitfields { friend class CastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned BasePathSize : 32 - 6 - NumExprBits; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; }; class ExprWithCleanupsBitfields { friend class ExprWithCleanups; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned NumObjects : 32 - NumExprBits; }; class PseudoObjectExprBitfields { friend class PseudoObjectExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class TypeTraitExprBitfields { friend class TypeTraitExpr; friend class ASTStmtReader; friend class ASTStmtWriter; unsigned : NumExprBits; /// \brief The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// \brief If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// \brief The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; union { // FIXME: this is wasteful on 64-bit platforms. void *Aligner; StmtBitfields StmtBits; CompoundStmtBitfields CompoundStmtBits; ExprBitfields ExprBits; CharacterLiteralBitfields CharacterLiteralBits; FloatingLiteralBitfields FloatingLiteralBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; DeclRefExprBitfields DeclRefExprBits; CastExprBitfields CastExprBits; CallExprBitfields CallExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; PseudoObjectExprBitfields PseudoObjectExprBits; ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; InitListExprBitfields InitListExprBits; TypeTraitExprBitfields TypeTraitExprBits; }; friend class ASTStmtReader; friend class ASTStmtWriter; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void* operator new(size_t bytes, void* mem) throw() { return mem; } void operator delete(void*, const ASTContext&, unsigned) throw() { } void operator delete(void*, const ASTContext*, unsigned) throw() { } void operator delete(void*, size_t) throw() { } void operator delete(void*, void*) throw() { } public: /// \brief A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell { }; private: /// \brief Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// \brief Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) { StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } public: Stmt(StmtClass SC) { StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getLocStart() const LLVM_READONLY; SourceLocation getLocEnd() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \brief Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip past any implicit AST nodes which might surround this /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes. Stmt *IgnoreImplicit(); /// \brief Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. typedef StmtIterator child_iterator; typedef ConstStmtIterator const_child_iterator; typedef StmtRange child_range; typedef ConstStmtRange const_child_range; child_range children(); const_child_range children() const { return const_cast<Stmt*>(this)->children(); } child_iterator child_begin() { return children().first; } child_iterator child_end() { return children().second; } const_child_iterator child_begin() const { return children().first; } const_child_iterator child_end() const { return children().second; } /// \brief Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. /// class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// \brief Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { } /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } SourceLocation getStartLoc() const { return StartLoc; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } typedef DeclGroupRef::iterator decl_iterator; typedef DeclGroupRef::const_iterator const_decl_iterator; typedef llvm::iterator_range<decl_iterator> decl_range; typedef llvm::iterator_range<const_decl_iterator> decl_const_range; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } typedef std::reverse_iterator<decl_iterator> reverse_decl_iterator; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { SourceLocation SemiLoc; /// \brief True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode bool HasLeadingEmptyMacro; public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass), SemiLoc(L), HasLeadingEmptyMacro(hasLeadingEmptyMacro) {} /// \brief Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty), HasLeadingEmptyMacro(false) { } SourceLocation getSemiLoc() const { return SemiLoc; } void setSemiLoc(SourceLocation L) { SemiLoc = L; } bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; } SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(); } friend class ASTStmtReader; friend class ASTStmtWriter; }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. /// class CompoundStmt : public Stmt { Stmt** Body; SourceLocation LBraceLoc, RBraceLoc; friend class ASTStmtReader; public: CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts, SourceLocation LB, SourceLocation RB); // \brief Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), Body(nullptr), LBraceLoc(Loc), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; } // \brief Build an empty compound statement. explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty), Body(nullptr) { CompoundStmtBits.NumStmts = 0; } void setStmts(const ASTContext &C, Stmt **Stmts, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } typedef Stmt** body_iterator; typedef llvm::iterator_range<body_iterator> body_range; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return Body; } body_iterator body_end() { return Body + size(); } Stmt *body_back() { return !body_empty() ? Body[size()-1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); Body[size()-1] = S; } typedef Stmt* const * const_body_iterator; typedef llvm::iterator_range<const_body_iterator> body_const_range; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return Body; } const_body_iterator body_end() const { return Body + size(); } const Stmt *body_back() const { return !body_empty() ? Body[size() - 1] : nullptr; } typedef std::reverse_iterator<body_iterator> reverse_body_iterator; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } typedef std::reverse_iterator<const_body_iterator> const_reverse_body_iterator; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getLocStart() const LLVM_READONLY { return LBraceLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RBraceLoc; } SourceLocation getLBracLoc() const { return LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(Body, Body + CompoundStmtBits.NumStmts); } const_child_range children() const { return child_range(Body, Body + CompoundStmtBits.NumStmts); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: // A pointer to the following CaseStmt or DefaultStmt class, // used by SwitchStmt. SwitchCase *NextSwitchCase; SourceLocation KeywordLoc; SourceLocation ColonLoc; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), NextSwitchCase(nullptr), KeywordLoc(KWLoc), ColonLoc(ColonLoc) { } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC), NextSwitchCase(nullptr) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return KeywordLoc; } void setKeywordLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase*>(this)->getSubStmt(); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; class CaseStmt : public SwitchCase { enum { LHS, RHS, SUBSTMT, END_EXPR }; Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for // GNU "case 1 ... 4" extension SourceLocation EllipsisLoc; public: CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { SubExprs[SUBSTMT] = nullptr; SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs); SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs); EllipsisLoc = ellipsisLoc; } /// \brief Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) { } SourceLocation getCaseLoc() const { return KeywordLoc; } void setCaseLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getEllipsisLoc() const { return EllipsisLoc; } void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); } Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); } Stmt *getSubStmt() { return SubExprs[SUBSTMT]; } const Expr *getLHS() const { return reinterpret_cast<const Expr*>(SubExprs[LHS]); } const Expr *getRHS() const { return reinterpret_cast<const Expr*>(SubExprs[RHS]); } const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; } void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; } void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); } void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[END_EXPR]); } }; class DefaultStmt : public SwitchCase { Stmt* SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// \brief Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) { } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return KeywordLoc; } void setDefaultLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt+1); } }; inline SourceLocation SwitchCase::getLocEnd() const { if (const CaseStmt *CS = dyn_cast<CaseStmt>(this)) return CS->getLocEnd(); return cast<DefaultStmt>(this)->getLocEnd(); } /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; /// class LabelStmt : public Stmt { LabelDecl *TheDecl; Stmt *SubStmt; SourceLocation IdentLoc; public: LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : Stmt(LabelStmtClass), TheDecl(D), SubStmt(substmt), IdentLoc(IL) { } // \brief Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { } SourceLocation getIdentLoc() const { return IdentLoc; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setIdentLoc(SourceLocation L) { IdentLoc = L; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// \brief Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } /// class AttributedStmt : public Stmt { Stmt *SubStmt; SourceLocation AttrLoc; unsigned NumAttrs; friend class ASTStmtReader; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt) : Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc), NumAttrs(Attrs.size()) { memcpy(getAttrArrayPtr(), Attrs.data(), Attrs.size() * sizeof(Attr *)); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) { memset(getAttrArrayPtr(), 0, NumAttrs * sizeof(Attr *)); } Attr *const *getAttrArrayPtr() const { return reinterpret_cast<Attr *const *>(this + 1); } Attr **getAttrArrayPtr() { return reinterpret_cast<Attr **>(this + 1); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); // \brief Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttrLoc; } ArrayRef<const Attr*> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. /// class IfStmt : public Stmt { enum { VAR, COND, THEN, ELSE, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation IfLoc; SourceLocation ElseLoc; public: IfStmt(const ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond, Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = nullptr); /// \brief Build an empty if/then/else statement explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } const Stmt *getThen() const { return SubExprs[THEN]; } void setThen(Stmt *S) { SubExprs[THEN] = S; } const Stmt *getElse() const { return SubExprs[ELSE]; } void setElse(Stmt *S) { SubExprs[ELSE] = S; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Stmt *getThen() { return SubExprs[THEN]; } Stmt *getElse() { return SubExprs[ELSE]; } SourceLocation getIfLoc() const { return IfLoc; } void setIfLoc(SourceLocation L) { IfLoc = L; } SourceLocation getElseLoc() const { return ElseLoc; } void setElseLoc(SourceLocation L) { ElseLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; } SourceLocation getLocEnd() const LLVM_READONLY { if (SubExprs[ELSE]) return SubExprs[ELSE]->getLocEnd(); else return SubExprs[THEN]->getLocEnd(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. /// class SwitchStmt : public Stmt { enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // This points to a linked list of case and default statements. SwitchCase *FirstCase; SourceLocation SwitchLoc; /// If the SwitchStmt is a switch on an enum value, this records whether /// all the enum values were covered by CaseStmts. This value is meant to /// be a hint for possible clients. unsigned AllEnumCasesCovered : 1; public: SwitchStmt(const ASTContext &C, VarDecl *Var, Expr *cond); /// \brief Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Stmt *getBody() const { return SubExprs[BODY]; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } Stmt *getBody() { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SwitchCase *getSwitchCaseList() { return FirstCase; } /// \brief Set the case list for this switch statement. void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { SubExprs[BODY] = S; SwitchLoc = SL; } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { AllEnumCasesCovered = 1; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return (bool) AllEnumCasesCovered; } SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. /// class WhileStmt : public Stmt { enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation WhileLoc; public: WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body, SourceLocation WL); /// \brief Build an empty while statement. explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// DoStmt - This represents a 'do/while' stmt. /// class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation DoLoc; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) { SubExprs[COND] = reinterpret_cast<Stmt*>(cond); SubExprs[BODY] = body; } /// \brief Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) { } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getDoLoc() const { return DoLoc; } void setDoLoc(SourceLocation L) { DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. /// class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation ForLoc; SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// \brief Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { } Stmt *getInit() { return SubExprs[INIT]; } /// \brief Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForLoc; } void setForLoc(SourceLocation L) { ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. /// class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation GotoLoc; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {} /// \brief Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { } LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoLoc; } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(); } }; /// IndirectGotoStmt - This represents an indirect goto. /// class IndirectGotoStmt : public Stmt { SourceLocation GotoLoc; SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc), Target((Stmt*)target) {} /// \brief Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) { } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr*>(Target); } const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);} void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt*>(this)->getConstantTarget(); } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target+1); } }; /// ContinueStmt - This represents a continue. /// class ContinueStmt : public Stmt { SourceLocation ContinueLoc; public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {} /// \brief Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) { } SourceLocation getContinueLoc() const { return ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(); } }; /// BreakStmt - This represents a break. /// class BreakStmt : public Stmt { SourceLocation BreakLoc; public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) {} /// \brief Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) { } SourceLocation getBreakLoc() const { return BreakLoc; } void setBreakLoc(SourceLocation L) { BreakLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. /// class ReturnStmt : public Stmt { Stmt *RetExpr; SourceLocation RetLoc; const VarDecl *NRVOCandidate; public: ReturnStmt(SourceLocation RL) : Stmt(ReturnStmtClass), RetExpr(nullptr), RetLoc(RL), NRVOCandidate(nullptr) {} ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate) : Stmt(ReturnStmtClass), RetExpr((Stmt*) E), RetLoc(RL), NRVOCandidate(NRVOCandidate) {} /// \brief Build an empty return expression. explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) { } const Expr *getRetValue() const; Expr *getRetValue(); void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); } SourceLocation getReturnLoc() const { return RetLoc; } void setReturnLoc(SourceLocation L) { RetLoc = L; } /// \brief Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return NRVOCandidate; } void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; } SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RetExpr ? RetExpr->getLocEnd() : RetLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr+1); return child_range(); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. /// class AsmStmt : public Stmt { protected: SourceLocation AsmLoc; /// \brief True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// \brief If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) { } friend class ASTStmtReader; public: /// \brief Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty), Exprs(nullptr) { } SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); } SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. typedef ExprIterator inputs_iterator; typedef ConstExprIterator const_inputs_iterator; typedef llvm::iterator_range<inputs_iterator> inputs_range; typedef llvm::iterator_range<const_inputs_iterator> inputs_const_range; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. typedef ExprIterator outputs_iterator; typedef ConstExprIterator const_outputs_iterator; typedef llvm::iterator_range<outputs_iterator> outputs_range; typedef llvm::iterator_range<const_outputs_iterator> outputs_const_range; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. /// class GCCAsmStmt : public AsmStmt { SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints; StringLiteral **Clobbers; IdentifierInfo **Names; friend class ASTStmtReader; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// \brief Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty), Constraints(nullptr), Clobbers(nullptr), Names(nullptr) { } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) { } bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. /// class MSAsmStmt : public AsmStmt { SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks; Token *AsmToks; StringRef *Constraints; StringRef *Clobbers; friend class ASTStmtReader; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// \brief Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty), NumAsmToks(0), AsmToks(nullptr), Constraints(nullptr), Clobbers(nullptr) { } SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) { } public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) { } public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getLocEnd(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); friend class ASTReader; friend class ASTStmtReader; explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) { } public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. /// class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// \brief Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) { } SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return LeaveLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(); } }; /// \brief This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// \brief The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_VLAType, }; /// \brief Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: /// \brief Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. /// Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr) : VarAndKind(Var, Kind), Loc(Loc) { switch (Kind) { case VCK_This: assert(!Var && "'this' capture cannot have a variable!"); break; case VCK_ByRef: assert(Var && "capturing by reference must have a variable!"); break; case VCK_VLAType: assert(!Var && "Variable-length array type capture cannot have a variable!"); break; } } /// \brief Determine the kind of capture. VariableCaptureKind getCaptureKind() const { return VarAndKind.getInt(); } /// \brief Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// \brief Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// \brief Determine whether this capture handles a variable. bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// \brief Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// \brief Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const { assert(capturesVariable() && "No variable available for 'this' or VAT capture"); return VarAndKind.getPointer(); } friend class ASTStmtReader; }; private: /// \brief The number of variable captured, including 'this'. unsigned NumCaptures; /// \brief The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind; /// \brief The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl; /// \brief Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// \brief Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() const { return reinterpret_cast<Stmt **>(const_cast<CapturedStmt *>(this) + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// \brief Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return const_cast<CapturedStmt *>(this)->getCapturedStmt(); } /// \brief Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl() { return CapDeclAndKind.getPointer(); } const CapturedDecl *getCapturedDecl() const { return const_cast<CapturedStmt *>(this)->getCapturedDecl(); } /// \brief Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D) { assert(D && "null CapturedDecl"); CapDeclAndKind.setPointer(D); } /// \brief Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const { return CapDeclAndKind.getInt(); } /// \brief Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind) { CapDeclAndKind.setInt(Kind); } /// \brief Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// \brief Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// \brief True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// \brief An iterator that walks over the captures. typedef Capture *capture_iterator; typedef const Capture *const_capture_iterator; typedef llvm::iterator_range<capture_iterator> capture_range; typedef llvm::iterator_range<const_capture_iterator> capture_const_range; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// \brief Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// \brief Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// \brief Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// \brief Iterator that walks over the capture initialization arguments. typedef Expr **capture_init_iterator; typedef llvm::iterator_range<capture_init_iterator> capture_init_range; capture_init_range capture_inits() const { return capture_init_range(capture_init_begin(), capture_init_end()); } /// \brief Retrieve the first initialization argument. capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr **>(getStoredStmts()); } /// \brief Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getLocStart() const LLVM_READONLY { return getCapturedStmt()->getLocStart(); } SourceLocation getLocEnd() const LLVM_READONLY { return getCapturedStmt()->getLocEnd(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); friend class ASTStmtReader; }; } // end namespace clang #endif
step.c
/*--------------------------------------------------------------------------------- STEP.C -Advances simulation by one timestep ---------------------------------------------------------------------------------*/ #include "decs.h" // Declarations double advance_fluid(struct GridGeom *G, struct FluidState *Si, struct FluidState *Ss, struct FluidState *Sf, double Dt); void step(struct GridGeom *G, struct FluidState *S) { static struct FluidState *Stmp; static struct FluidState *Ssave; static int first_call = 1; if (first_call) { Stmp = calloc(1,sizeof(struct FluidState)); Ssave = calloc(1,sizeof(struct FluidState)); first_call = 0; } // Need both P_n and P_n+1 to calculate current // Work around ICC 18.0.2 bug in assigning to pointers to structs #if INTEL_WORKAROUND memcpy(&(Ssave->P),&(S->P),sizeof(GridPrim)); #else #pragma omp parallel for simd collapse(2) PLOOP ZLOOPALL Ssave->P[ip][j][i] = S->P[ip][j][i]; #endif LOGN("Step %d",nstep); FLAG("Start step"); // Predictor setup advance_fluid(G, S, S, Stmp, 0.5*dt); FLAG("Advance Fluid Tmp"); #if ELECTRONS heat_electrons(G, S, Stmp); FLAG("Heat Electrons Tmp"); #endif // Fixup routines: smooth over outlier zones fixup(G, Stmp); FLAG("Fixup Tmp"); #if ELECTRONS fixup_electrons(Stmp); FLAG("Fixup e- Tmp"); #endif set_bounds(G, Stmp); FLAG("First bounds Tmp"); fixup_utoprim(G, Stmp); FLAG("Fixup U_to_P Tmp"); set_bounds(G, Stmp); FLAG("Second bounds Tmp"); // Corrector step double ndt = advance_fluid(G, S, Stmp, S, dt); FLAG("Advance Fluid Full"); #if ELECTRONS heat_electrons(G, Stmp, S); FLAG("Heat Electrons Full"); #endif fixup(G, S); FLAG("Fixup Full"); #if ELECTRONS fixup_electrons(S); FLAG("Fixup e- Full"); #endif set_bounds(G, S); FLAG("First bounds Full"); fixup_utoprim(G, S); FLAG("Fixup U_to_P Full"); set_bounds(G, S); FLAG("Second bounds Full"); // Increment time t += dt; // If we're dumping this step, update the current if (t >= tdump) { current_calc(G, S, Ssave, dt); } // Set next timestep if (ndt > SAFE * dt) dt = SAFE * dt; else dt = ndt; } inline double advance_fluid(struct GridGeom *G, struct FluidState *Si, struct FluidState *Ss, struct FluidState *Sf, double Dt) { static GridPrim *dU; static struct FluidFlux *F; static int firstc = 1; if (firstc) { dU = calloc(1,sizeof(GridPrim)); F = calloc(1,sizeof(struct FluidFlux)); firstc = 0; } // Work around ICC 18.0.2 bug in assigning to pointers to structs #if INTEL_WORKAROUND memcpy(&(Sf->P),&(Si->P),sizeof(GridPrim)); #else #pragma omp parallel for simd collapse(2) PLOOP ZLOOPALL Sf->P[ip][j][i] = Si->P[ip][j][i]; #endif double ndt = get_flux(G, Ss, F); #if METRIC == MKS fix_flux(F); #endif //Constrained transport for B flux_ct(F); // Flux diagnostic globals diag_flux(F); // Update Si to Sf timer_start(TIMER_UPDATE_U); get_state_vec(G, Ss, CENT, 0, N2 - 1, 0, N1 - 1); get_fluid_source(G, Ss, dU); get_state_vec(G, Si, CENT, 0, N2 - 1, 0, N1 - 1); prim_to_flux_vec(G, Si, 0, CENT, 0, N2 - 1, 0, N1 - 1, Si->U); #pragma omp parallel for collapse(2) PLOOP ZLOOP { Sf->U[ip][j][i] = Si->U[ip][j][i] + Dt*((F->X1[ip][j][i] - F->X1[ip][j][i+1])/dx[1] + (F->X2[ip][j][i] - F->X2[ip][j+1][i])/dx[2] + (*dU)[ip][j][i]); } timer_stop(TIMER_UPDATE_U); timer_start(TIMER_U_TO_P); #pragma omp parallel for collapse(2) ZLOOP pflag[j][i] = U_to_P(G, Sf, i, j, CENT); timer_stop(TIMER_U_TO_P); #pragma omp parallel for simd ZLOOPALL fail_save[j][i] = pflag[j][i]; return ndt; }
bf16_vec_kernel.h
#include "vec_type_cvt.h" #if defined(CPU_CAPABILITY_AVX512) #include <immintrin.h> #else #include "csrc/cpu/vec512/ref/add_ker.h" #include "csrc/cpu/vec512/ref/mov_ker.h" using namespace torch_ipex::cpu::kernel; #endif #if defined(CPU_CAPABILITY_AVX512) inline __m512 pack_bf16_to_fp32(const __m256i top, const __m256i bot) { auto x1 = _mm512_cvtepu16_epi32(top); auto x2 = _mm512_cvtepu16_epi32(bot); auto y = _mm512_add_epi32(_mm512_bslli_epi128(x1, 2), x2); return _mm512_castsi512_ps(y); } #endif // Only support AVX512 impl at current stage. Will expand this impl to cover // AVX2 and other cases. inline void packed_bf16_add_ker( at::BFloat16* a1, at::BFloat16* a2, at::BFloat16* b, int len, float alpha) { #if defined(CPU_CAPABILITY_AVX512) auto vAlpha = _mm512_set1_ps(alpha); int i = 0; for (; i < len - 15; i += 16) { auto x1 = _mm256_loadu_si256((__m256i*)(a1 + i)); auto x2 = _mm256_loadu_si256((__m256i*)(a2 + i)); auto y1 = _mm256_loadu_si256((__m256i*)(b + i)); auto z1 = pack_bf16_to_fp32(x1, x2); auto z2 = cvt_bf16_to_fp32(y1); z1 = _mm512_fmadd_ps(vAlpha, z2, z1); // Update result back to split input tensors. _mm256_storeu_si256((__m256i*)(a1 + i), trunc_fp32_to_bf16(z1)); _mm256_storeu_si256( (__m256i*)(a2 + i), _mm512_cvtepi32_epi16(_mm512_castps_si512(z1))); } if (i < len) { __mmask16 mask = (1 << (len - i)) - 1; auto x1 = _mm256_maskz_loadu_epi16(mask, a1 + i); auto x2 = _mm256_maskz_loadu_epi16(mask, a2 + i); auto y1 = _mm256_maskz_loadu_epi16(mask, b + i); auto z1 = pack_bf16_to_fp32(x1, x2); auto z2 = cvt_bf16_to_fp32(y1); z1 = _mm512_fmadd_ps(vAlpha, z2, z1); // Update result back to split input tensors. _mm256_mask_storeu_epi16(a1 + i, mask, trunc_fp32_to_bf16(z1)); _mm256_mask_storeu_epi16( a2 + i, mask, _mm512_cvtepi32_epi16(_mm512_castps_si512(z1))); } #else for (int i = 0; i < len; i++) { uint32_t hi = (a1 + i)->x; uint32_t lo = (a2 + i)->x; uint32_t merge = hi << 16 | lo; float a_val = *((float*)&merge); float b_val = *(b + i); float res = a_val + b_val * alpha; (a1 + i)->x = (uint16_t)((*((uint32_t*)(&res))) >> 16); (a2 + i)->x = *((uint16_t*)(&res)); } #endif } inline void add_ker(at::BFloat16* inout, at::BFloat16* in, int len) { int i = 0; #if defined(CPU_CAPABILITY_AVX512) #pragma unroll(2) for (i = 0; i < len - 31; i += 32) { auto inout1 = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(inout + i))); auto inout2 = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(inout + i + 16))); auto in1 = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(in + i))); auto in2 = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(in + i + 16))); inout1 = _mm512_add_ps(inout1, in1); inout2 = _mm512_add_ps(inout2, in2); _mm256_storeu_si256((__m256i*)(inout + i), cvt_fp32_to_bf16(inout1)); _mm256_storeu_si256((__m256i*)(inout + i + 16), cvt_fp32_to_bf16(inout2)); } if (i < len - 15) { auto inout1 = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(inout + i))); auto in1 = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(in + i))); inout1 = _mm512_add_ps(inout1, in1); _mm256_storeu_si256((__m256i*)(inout + i), cvt_fp32_to_bf16(inout1)); i += 16; } if (i < len) { auto mask = (1 << (len - i)) - 1; auto inout1 = cvt_bf16_to_fp32(_mm256_maskz_loadu_epi16(mask, inout + i)); auto in1 = cvt_bf16_to_fp32(_mm256_maskz_loadu_epi16(mask, in + i)); inout1 = _mm512_add_ps(inout1, in1); _mm256_mask_storeu_epi16(inout + i, mask, cvt_fp32_to_bf16(inout1)); } #else ref::add_ker(inout, in, len); #endif } static inline void add_ker(float* inout, float* in, int len) { int i = 0; #if defined(CPU_CAPABILITY_AVX512) #pragma unroll(2) for (i = 0; i < len - 31; i += 32) { auto out1 = _mm512_loadu_ps(inout + i); auto out2 = _mm512_loadu_ps(inout + i + 16); auto in1 = _mm512_loadu_ps(in + i); auto in2 = _mm512_loadu_ps(in + i + 16); out1 = _mm512_add_ps(out1, in1); out2 = _mm512_add_ps(out2, in2); _mm512_storeu_ps(inout + i, out1); _mm512_storeu_ps(inout + i + 16, out2); } if (i < len - 15) { auto out1 = _mm512_loadu_ps(inout + i); auto in1 = _mm512_loadu_ps(in + i); _mm512_storeu_ps(inout + i, _mm512_add_ps(out1, in1)); i += 16; } if (i < len) { auto mask = (1 << (len - i)) - 1; auto out1 = _mm512_maskz_loadu_ps(mask, inout + i); auto in1 = _mm512_maskz_loadu_ps(mask, in + i); _mm512_mask_storeu_ps(inout + i, mask, _mm512_add_ps(out1, in1)); } #else ref::add_ker(inout, in, len); #endif } static inline void add_ker(float* inout, at::BFloat16* in, int len) { int i = 0; #if defined(CPU_CAPABILITY_AVX512) #pragma unroll(2) for (i = 0; i < len - 31; i += 32) { auto in1 = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(in + i))); auto in2 = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(in + i + 16))); auto inout1 = _mm512_loadu_ps(inout + i); auto inout2 = _mm512_loadu_ps(inout + i + 16); inout1 = _mm512_add_ps(inout1, in1); inout2 = _mm512_add_ps(inout2, in2); _mm512_storeu_ps(inout + i, inout1); _mm512_storeu_ps(inout + i + 16, inout2); } if (i < len - 15) { auto in1 = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(in + i))); auto inout1 = _mm512_loadu_ps(inout + i); inout1 = _mm512_add_ps(inout1, in1); _mm512_storeu_ps(inout + i, inout1); i += 16; } if (i < len) { auto mask = (1 << (len - i)) - 1; auto in1 = cvt_bf16_to_fp32(_mm256_maskz_loadu_epi16(mask, in + i)); auto inout1 = _mm512_maskz_loadu_ps(mask, inout + i); inout1 = _mm512_add_ps(inout1, in1); _mm512_mask_storeu_ps(inout + i, mask, inout1); } #else ref::add_ker(inout, in, len); #endif } inline void add_ker(double* inout, double* in, int len) { #pragma omp simd for (int i = 0; i < len; i++) { *(inout + i) += *(in + i); } } static inline void move_ker(at::BFloat16* out, float* in, int64_t len) { int64_t i = 0; #if defined(CPU_CAPABILITY_AVX512) #pragma unroll(4) for (i = 0; i < len - 31; i += 32) { auto in0 = cvt_fp32_to_bf16(_mm512_loadu_ps(in + i)); auto in1 = cvt_fp32_to_bf16(_mm512_loadu_ps(in + i + 16)); _mm256_storeu_si256((__m256i*)(out + i), in0); _mm256_storeu_si256((__m256i*)(out + i + 16), in1); } if (i < len - 15) { auto in0 = cvt_fp32_to_bf16(_mm512_loadu_ps(in + i)); _mm256_storeu_si256((__m256i*)(out + i), in0); i += 16; } if (i < len) { auto mask = ((1 << (len - i)) - 1); auto in0 = cvt_fp32_to_bf16(_mm512_maskz_loadu_ps(mask, in + i)); _mm256_mask_storeu_epi16((__m256i*)(out + i), mask, in0); } #else ref::mov_ker(out, in, len); #endif } static inline void move_ker(float* out, const float* in, int64_t len) { int64_t i = 0; #if defined(CPU_CAPABILITY_AVX512) #pragma unroll(4) for (i = 0; i < len - 15; i += 16) { auto in0 = _mm512_loadu_ps(in + i); _mm512_storeu_ps(out + i, in0); } if (i < len) { auto mask = ((1 << (len - i)) - 1); auto in0 = _mm512_maskz_loadu_ps(mask, in + i); _mm512_mask_storeu_ps(out + i, mask, in0); } #else ref::mov_ker(out, in, len); #endif } static inline void move_ker( at::BFloat16* out, const at::BFloat16* in, int64_t len) { int64_t i = 0; #if defined(CPU_CAPABILITY_AVX512) #pragma unroll(4) for (i = 0; i < len - 31; i += 32) { auto in0 = _mm512_loadu_si512(in + i); _mm512_storeu_si512(out + i, in0); } if (i < len) { auto mask = (1 << (len - i)) - 1; auto in0 = _mm512_maskz_loadu_epi16(mask, in + i); _mm512_mask_storeu_epi16(out + i, mask, in0); } #else ref::mov_ker(out, in, len); #endif } static inline void move_ker(int64_t* out, int64_t* in, int64_t len) { int64_t i = 0; #if defined(CPU_CAPABILITY_AVX512) #pragma unroll(4) for (i = 0; i < len - 7; i += 8) { auto in0 = _mm512_loadu_pd(in + i); _mm512_storeu_pd(out + i, in0); } if (i < len) { auto mask = ((1 << (len - i)) - 1); auto in0 = _mm512_maskz_loadu_pd(mask, in + i); _mm512_mask_storeu_pd(out + i, mask, in0); } #else ref::mov_ker(out, in, len); #endif } static inline void move_ker(int32_t* out, const int32_t* in, int64_t len) { int64_t i = 0; #if defined(CPU_CAPABILITY_AVX512) #pragma unroll(4) for (i = 0; i < len - 15; i += 16) { auto in0 = _mm512_loadu_ps(in + i); _mm512_storeu_ps(out + i, in0); } if (i < len) { auto mask = ((1 << (len - i)) - 1); auto in0 = _mm512_maskz_loadu_ps(mask, in + i); _mm512_mask_storeu_ps(out + i, mask, in0); } #else ref::mov_ker(out, in, len); #endif } static inline void move_ker(double* out, double* in, int len) { #pragma omp simd for (int i = 0; i < len; i++) { *(out + i) = *(in + i); } } static inline void zero_ker(double* out, int len) { #pragma omp simd for (int i = 0; i < len; i++) { *(out + i) = 0; } } static inline void zero_ker(float* out, int64_t len) { int64_t i = 0; #if defined(CPU_CAPABILITY_AVX512) __m512 zero_512 = _mm512_setzero_ps(); #pragma unroll(4) for (i = 0; i < len - 15; i += 16) { _mm512_storeu_ps(out + i, zero_512); } if (i < len) { auto mask = ((1 << (len - i)) - 1); _mm512_mask_storeu_ps(out + i, mask, zero_512); } #else memset(out, 0, len * sizeof(float)); #endif } static inline void zero_ker(at::BFloat16* out, int64_t len) { int64_t i = 0; #if defined(CPU_CAPABILITY_AVX512) __m512i zero_512 = _mm512_setzero_si512(); #pragma unroll(4) for (i = 0; i < len - 31; i += 32) { _mm512_storeu_si512(out + i, zero_512); } if (i < len) { auto mask = ((1 << (len - i)) - 1); _mm512_mask_storeu_epi16(out + i, mask, zero_512); } #else memset(out, 0, len * sizeof(at::BFloat16)); #endif } #if defined(CPU_CAPABILITY_AVX512) inline __m512 convert_bf16_to_fp32(const __m256i src) { __m512i y = _mm512_cvtepu16_epi32(src); return _mm512_castsi512_ps(_mm512_bslli_epi128(y, 2)); } #endif template <typename T> inline float toFloat(T val) { float ret = float(val); return ret; } template <typename T1, typename T2> inline void madd_ker(T1* inout, T2* in, int len, float alpha) { #pragma omp simd for (long v = 0; v < len; v++) { inout[v] += toFloat(in[v]) * alpha; } } #if defined(CPU_CAPABILITY_AVX512) template <> inline void madd_ker(float* inout, at::BFloat16* in, int len, float alpha) { __m512 vAlpha = _mm512_set1_ps(alpha); int i = 0; for (; i < len - 15; i += 16) { __m512 y1 = _mm512_loadu_ps(inout + i); __m512 y2 = convert_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(in + i))); y1 = _mm512_fmadd_ps(vAlpha, y2, y1); _mm512_storeu_ps(inout + i, y1); } if (i < len) { int rem = len - i; __mmask16 mask = (1 << rem) - 1; __m512 y1 = _mm512_maskz_loadu_ps(mask, inout + i); __m512 y2 = convert_bf16_to_fp32(_mm256_maskz_loadu_epi16(mask, in + i)); y1 = _mm512_fmadd_ps(vAlpha, y2, y1); _mm512_mask_storeu_ps(inout + i, mask, y1); } } #endif
GB_unop__sin_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__sin_fc32_fc32 // op(A') function: GB_unop_tran__sin_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = csinf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = csinf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = csinf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIN || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__sin_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = csinf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = csinf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__sin_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
requires.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu && env LIBOMPTARGET_DEBUG=1 %libomptarget-run-aarch64-unknown-linux-gnu 2>&1 | %fcheck-aarch64-unknown-linux-gnu -allow-empty -check-prefix=DEBUG // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu && env LIBOMPTARGET_DEBUG=1 %libomptarget-run-powerpc64-ibm-linux-gnu 2>&1 | %fcheck-powerpc64-ibm-linux-gnu -allow-empty -check-prefix=DEBUG // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu && env LIBOMPTARGET_DEBUG=1 %libomptarget-run-powerpc64le-ibm-linux-gnu 2>&1 | %fcheck-powerpc64le-ibm-linux-gnu -allow-empty -check-prefix=DEBUG // RUN: %libomptarget-compile-x86_64-pc-linux-gnu && env LIBOMPTARGET_DEBUG=1 %libomptarget-run-x86_64-pc-linux-gnu 2>&1 | %fcheck-x86_64-pc-linux-gnu -allow-empty -check-prefix=DEBUG // REQUIRES: libomptarget-debug /* Test for the 'requires' clause check. When a target region is used, the requires flags are set in the runtime for the entire compilation unit. If the flags are set again, (for whatever reason) the set must be consistent with previously set values. */ #include <stdio.h> #include <omp.h> // --------------------------------------------------------------------------- // Various definitions copied from OpenMP RTL extern void __tgt_register_requires(int64_t); // End of definitions copied from OpenMP RTL. // --------------------------------------------------------------------------- void run_reg_requires() { // Before the target region is registered, the requires registers the status // of the requires clauses. Since there are no requires clauses in this file // the flags state can only be OMP_REQ_NONE i.e. 1. // This is the 2nd time this function is called so it should print the debug // info belonging to the check. __tgt_register_requires(1); __tgt_register_requires(1); // DEBUG: New requires flags 1 compatible with existing 1! } // --------------------------------------------------------------------------- int main() { run_reg_requires(); // This also runs reg requires for the first time. #pragma omp target {} return 0; }
DenseMatrix.h
//================================================================================================= /*! // \file blaze/math/smp/openmp/DenseMatrix.h // \brief Header file for the OpenMP-based dense matrix SMP implementation // // Copyright (C) 2013 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_ #define _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_ //************************************************************************************************* // Includes //************************************************************************************************* #include <omp.h> #include <blaze/math/Aliases.h> #include <blaze/math/constraints/SMPAssignable.h> #include <blaze/math/expressions/DenseMatrix.h> #include <blaze/math/expressions/SparseMatrix.h> #include <blaze/math/Functions.h> #include <blaze/math/simd/SIMDTrait.h> #include <blaze/math/smp/ParallelSection.h> #include <blaze/math/smp/SerialSection.h> #include <blaze/math/StorageOrder.h> #include <blaze/math/Submatrix.h> #include <blaze/math/traits/SubmatrixExprTrait.h> #include <blaze/math/typetraits/IsDenseMatrix.h> #include <blaze/math/typetraits/IsSMPAssignable.h> #include <blaze/system/SMP.h> #include <blaze/util/Assert.h> #include <blaze/util/EnableIf.h> #include <blaze/util/logging/FunctionTrace.h> #include <blaze/util/mpl/And.h> #include <blaze/util/mpl/Not.h> #include <blaze/util/mpl/Or.h> #include <blaze/util/StaticAssert.h> #include <blaze/util/Types.h> #include <blaze/util/typetraits/IsSame.h> namespace blaze { //================================================================================================= // // PLAIN ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP assignment of a row-major dense matrix to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side row-major dense matrix to be assigned. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a row-major // dense matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side dense matrix void smpAssign_backend( DenseMatrix<MT1,SO>& lhs, const DenseMatrix<MT2,rowMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef ElementType_<MT1> ET1; typedef ElementType_<MT2> ET2; typedef SubmatrixExprTrait_<MT1,aligned> AlignedTarget; typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget; enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<MT1> >::size }; const bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSame<ET1,ET2>::value ); const bool lhsAligned ( (~lhs).isAligned() ); const bool rhsAligned ( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).rows() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).rows() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t rowsPerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t row( i*rowsPerThread ); if( row >= (~lhs).rows() ) continue; const size_t m( min( rowsPerThread, (~lhs).rows() - row ) ); if( simdEnabled && lhsAligned && rhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); assign( target, submatrix<aligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } else if( simdEnabled && lhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); assign( target, submatrix<unaligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } else if( simdEnabled && rhsAligned ) { UnalignedTarget target( submatrix<unaligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); assign( target, submatrix<aligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } else { UnalignedTarget target( submatrix<unaligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); assign( target, submatrix<unaligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP assignment of a column-major dense matrix to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side column-major dense matrix to be assigned. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a column-major // dense matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side dense matrix void smpAssign_backend( DenseMatrix<MT1,SO>& lhs, const DenseMatrix<MT2,columnMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef ElementType_<MT1> ET1; typedef ElementType_<MT2> ET2; typedef SubmatrixExprTrait_<MT1,aligned> AlignedTarget; typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget; enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<MT1> >::size }; const bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSame<ET1,ET2>::value ); const bool lhsAligned ( (~lhs).isAligned() ); const bool rhsAligned ( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).columns() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).columns() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t colsPerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t column( i*colsPerThread ); if( column >= (~lhs).columns() ) continue; const size_t n( min( colsPerThread, (~lhs).columns() - column ) ); if( simdEnabled && lhsAligned && rhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); assign( target, submatrix<aligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } else if( simdEnabled && lhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); assign( target, submatrix<unaligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } else if( simdEnabled && rhsAligned ) { UnalignedTarget target( submatrix<unaligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); assign( target, submatrix<aligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } else { UnalignedTarget target( submatrix<unaligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); assign( target, submatrix<unaligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP assignment of a row-major sparse matrix to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side row-major sparse matrix to be assigned. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a row-major // sparse matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side sparse matrix void smpAssign_backend( DenseMatrix<MT1,SO>& lhs, const SparseMatrix<MT2,rowMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef ElementType_<MT1> ET1; typedef ElementType_<MT2> ET2; typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget; const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).rows() % threads ) != 0UL )? 1UL : 0UL ); const size_t rowsPerThread( (~lhs).rows() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t row( i*rowsPerThread ); if( row >= (~lhs).rows() ) continue; const size_t m( min( rowsPerThread, (~lhs).rows() - row ) ); UnalignedTarget target( submatrix<unaligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); assign( target, submatrix<unaligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP assignment of a column-major sparse matrix to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side column-major sparse matrix to be assigned. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a column-major // sparse matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side sparse matrix void smpAssign_backend( DenseMatrix<MT1,SO>& lhs, const SparseMatrix<MT2,columnMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef ElementType_<MT1> ET1; typedef ElementType_<MT2> ET2; typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget; const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).columns() % threads ) != 0UL )? 1UL : 0UL ); const size_t colsPerThread( (~lhs).columns() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t column( i*colsPerThread ); if( column >= (~lhs).columns() ) continue; const size_t n( min( colsPerThread, (~lhs).columns() - column ) ); UnalignedTarget target( submatrix<unaligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); assign( target, submatrix<unaligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be assigned. // \return void // // This function implements the default OpenMP-based SMP assignment to a dense matrix. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_< And< IsDenseMatrix<MT1> , Or< Not< IsSMPAssignable<MT1> > , Not< IsSMPAssignable<MT2> > > > > smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); assign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP assignment to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side column-major sparse matrix to be assigned. // \return void // // This function implements the OpenMP-based SMP assignment to a dense matrix. Due to the // explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > > smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { assign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) smpAssign_backend( ~lhs, ~rhs ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // ADDITION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP addition assignment of a row-major dense matrix // to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side row-major dense matrix to be added. // \return void // // This function is the backend implementation of the OpenMP-based SMP addition assignment of a // row-major dense matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side dense matrix void smpAddAssign_backend( DenseMatrix<MT1,SO>& lhs, const DenseMatrix<MT2,rowMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef ElementType_<MT1> ET1; typedef ElementType_<MT2> ET2; typedef SubmatrixExprTrait_<MT1,aligned> AlignedTarget; typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget; enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<MT1> >::size }; const bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSame<ET1,ET2>::value ); const bool lhsAligned ( (~lhs).isAligned() ); const bool rhsAligned ( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).rows() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).rows() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t rowsPerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t row( i*rowsPerThread ); if( row >= (~lhs).rows() ) continue; const size_t m( min( rowsPerThread, (~lhs).rows() - row ) ); if( simdEnabled && lhsAligned && rhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); addAssign( target, submatrix<aligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } else if( simdEnabled && lhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); addAssign( target, submatrix<unaligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } else if( simdEnabled && rhsAligned ) { UnalignedTarget target( submatrix<unaligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); addAssign( target, submatrix<aligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } else { UnalignedTarget target( submatrix<unaligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); addAssign( target, submatrix<unaligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP addition assignment of a column-major dense matrix // to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side column-major dense matrix to be added. // \return void // // This function is the backend implementation of the OpenMP-based SMP addition assignment of a // column-major dense matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side dense matrix void smpAddAssign_backend( DenseMatrix<MT1,SO>& lhs, const DenseMatrix<MT2,columnMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef ElementType_<MT1> ET1; typedef ElementType_<MT2> ET2; typedef SubmatrixExprTrait_<MT1,aligned> AlignedTarget; typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget; enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<MT1> >::size }; const bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSame<ET1,ET2>::value ); const bool lhsAligned ( (~lhs).isAligned() ); const bool rhsAligned ( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).columns() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).columns() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t colsPerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t column( i*colsPerThread ); if( column >= (~lhs).columns() ) continue; const size_t n( min( colsPerThread, (~lhs).columns() - column ) ); if( simdEnabled && lhsAligned && rhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); addAssign( target, submatrix<aligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } else if( simdEnabled && lhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); addAssign( target, submatrix<unaligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } else if( simdEnabled && rhsAligned ) { UnalignedTarget target( submatrix<unaligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); addAssign( target, submatrix<aligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } else { UnalignedTarget target( submatrix<unaligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); addAssign( target, submatrix<unaligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP addition assignment of a row-major sparse matrix // to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side row-major sparse matrix to be added. // \return void // // This function is the backend implementation of the OpenMP-based SMP addition assignment of a // row-major sparse matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side sparse matrix void smpAddAssign_backend( DenseMatrix<MT1,SO>& lhs, const SparseMatrix<MT2,rowMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef ElementType_<MT1> ET1; typedef ElementType_<MT2> ET2; typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget; const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).rows() % threads ) != 0UL )? 1UL : 0UL ); const size_t rowsPerThread( (~lhs).rows() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t row( i*rowsPerThread ); if( row >= (~lhs).rows() ) continue; const size_t m( min( rowsPerThread, (~lhs).rows() - row ) ); UnalignedTarget target( submatrix<unaligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); addAssign( target, submatrix<unaligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP addition assignment of a column-major sparse matrix // to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side column-major sparse matrix to be added. // \return void // // This function is the backend implementation of the OpenMP-based SMP addition assignment of a // column-major sparse matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side sparse matrix void smpAddAssign_backend( DenseMatrix<MT1,SO>& lhs, const SparseMatrix<MT2,columnMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef ElementType_<MT1> ET1; typedef ElementType_<MT2> ET2; typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget; const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).columns() % threads ) != 0UL )? 1UL : 0UL ); const size_t colsPerThread( (~lhs).columns() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t column( i*colsPerThread ); if( column >= (~lhs).columns() ) continue; const size_t n( min( colsPerThread, (~lhs).columns() - column ) ); UnalignedTarget target( submatrix<unaligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); addAssign( target, submatrix<unaligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be added. // \return void // // This function implements the default OpenMP-based SMP addition assignment to a dense matrix. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_< And< IsDenseMatrix<MT1> , Or< Not< IsSMPAssignable<MT1> > , Not< IsSMPAssignable<MT2> > > > > smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); addAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side row-major dense matrix to be added. // \return void // // This function implements the OpenMP-based SMP addition assignment to a dense matrix. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > > smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { addAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) smpAddAssign_backend( ~lhs, ~rhs ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // SUBTRACTION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP subtraction assignment of a row-major dense matrix // to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side row-major dense matrix to be subtracted. // \return void // // This function is the backend implementation of the OpenMP-based SMP subtraction assignment // of a row-major dense matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side dense matrix void smpSubAssign_backend( DenseMatrix<MT1,SO>& lhs, const DenseMatrix<MT2,rowMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef ElementType_<MT1> ET1; typedef ElementType_<MT2> ET2; typedef SubmatrixExprTrait_<MT1,aligned> AlignedTarget; typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget; enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<MT1> >::size }; const bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSame<ET1,ET2>::value ); const bool lhsAligned ( (~lhs).isAligned() ); const bool rhsAligned ( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).rows() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).rows() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t rowsPerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t row( i*rowsPerThread ); if( row >= (~lhs).rows() ) continue; const size_t m( min( rowsPerThread, (~lhs).rows() - row ) ); if( simdEnabled && lhsAligned && rhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); subAssign( target, submatrix<aligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } else if( simdEnabled && lhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); subAssign( target, submatrix<unaligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } else if( simdEnabled && rhsAligned ) { UnalignedTarget target( submatrix<unaligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); subAssign( target, submatrix<aligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } else { UnalignedTarget target( submatrix<unaligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); subAssign( target, submatrix<unaligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP subtraction assignment of a column-major dense matrix // to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side column-major dense matrix to be subtracted. // \return void // // This function is the backend implementation of the OpenMP-based SMP subtraction assignment // of a column-major dense matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side dense matrix void smpSubAssign_backend( DenseMatrix<MT1,SO>& lhs, const DenseMatrix<MT2,columnMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef ElementType_<MT1> ET1; typedef ElementType_<MT2> ET2; typedef SubmatrixExprTrait_<MT1,aligned> AlignedTarget; typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget; enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<MT1> >::size }; const bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSame<ET1,ET2>::value ); const bool lhsAligned ( (~lhs).isAligned() ); const bool rhsAligned ( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).columns() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).columns() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t colsPerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t column( i*colsPerThread ); if( column >= (~lhs).columns() ) continue; const size_t n( min( colsPerThread, (~lhs).columns() - column ) ); if( simdEnabled && lhsAligned && rhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); subAssign( target, submatrix<aligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } else if( simdEnabled && lhsAligned ) { AlignedTarget target( submatrix<aligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); subAssign( target, submatrix<unaligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } else if( simdEnabled && rhsAligned ) { UnalignedTarget target( submatrix<unaligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); subAssign( target, submatrix<aligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } else { UnalignedTarget target( submatrix<unaligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); subAssign( target, submatrix<unaligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP subtraction assignment of a row-major sparse matrix // to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side row-major sparse matrix to be subtracted. // \return void // // This function is the backend implementation of the OpenMP-based SMP subtraction assignment // of a row-major sparse matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side sparse matrix void smpSubAssign_backend( DenseMatrix<MT1,SO>& lhs, const SparseMatrix<MT2,rowMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef ElementType_<MT1> ET1; typedef ElementType_<MT2> ET2; typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget; const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).rows() % threads ) != 0UL )? 1UL : 0UL ); const size_t rowsPerThread( (~lhs).rows() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t row( i*rowsPerThread ); if( row >= (~lhs).rows() ) continue; const size_t m( min( rowsPerThread, (~lhs).rows() - row ) ); UnalignedTarget target( submatrix<unaligned>( ~lhs, row, 0UL, m, (~lhs).columns() ) ); subAssign( target, submatrix<unaligned>( ~rhs, row, 0UL, m, (~lhs).columns() ) ); } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP subtraction assignment of a column-major sparse matrix // to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side column-major sparse matrix to be subtracted. // \return void // // This function is the backend implementation of the OpenMP-based SMP subtraction assignment // of a column-major sparse matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO // Storage order of the left-hand side dense matrix , typename MT2 > // Type of the right-hand side sparse matrix void smpSubAssign_backend( DenseMatrix<MT1,SO>& lhs, const SparseMatrix<MT2,columnMajor>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef ElementType_<MT1> ET1; typedef ElementType_<MT2> ET2; typedef SubmatrixExprTrait_<MT1,unaligned> UnalignedTarget; const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).columns() % threads ) != 0UL )? 1UL : 0UL ); const size_t colsPerThread( (~lhs).columns() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t column( i*colsPerThread ); if( column >= (~lhs).columns() ) continue; const size_t n( min( colsPerThread, (~lhs).columns() - column ) ); UnalignedTarget target( submatrix<unaligned>( ~lhs, 0UL, column, (~lhs).rows(), n ) ); subAssign( target, submatrix<unaligned>( ~rhs, 0UL, column, (~lhs).rows(), n ) ); } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP subtracction assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment to a dense matrix. // Due to the explicit application of the SFINAE principle, this function can only be selected by // the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_< And< IsDenseMatrix<MT1> , Or< Not< IsSMPAssignable<MT1> > , Not< IsSMPAssignable<MT2> > > > > smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); subAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP subtracction assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment of a matrix to a // dense matrix. Due to the explicit application of the SFINAE principle, this function can only // be selected by the compiler in case both operands are SMP-assignable and the element types of // both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > > smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { subAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) smpSubAssign_backend( ~lhs, ~rhs ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // MULTIPLICATION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be multiplied. // \return void // // This function implements the default OpenMP-based SMP multiplication assignment to a dense // matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline EnableIf_< IsDenseMatrix<MT1> > smpMultAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); multAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // COMPILE TIME CONSTRAINT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ namespace { BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE ); } /*! \endcond */ //************************************************************************************************* } // namespace blaze #endif
GB_unop__signum_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__signum_fp32_fp32) // op(A') function: GB (_unop_tran__signum_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = GB_signumf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_signumf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = GB_signumf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIGNUM || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__signum_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = GB_signumf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = GB_signumf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__signum_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
01_lu_factorization.c
#include<stdio.h> #include<stdlib.h> #include<omp.h> void factorize(double **matrix, int matrix_size){ int pid, nprocs, i, j, k, row, min, max; #pragma omp parallel shared(matrix, matrix_size, nprocs) private(i, j, k, row, min, max, pid) { #ifdef _OPENMP nprocs = omp_get_num_threads(); pid = omp_get_thread_num(); #endif printf("nprocs: %d, pid: %d\n", nprocs, pid); row = matrix_size/nprocs; min = pid * row; max = min + row - 1; if (pid == nprocs - 1 && (matrix_size - (max + 1))>0) max = matrix_size - 1; for (k = 0; k < matrix_size; k++){ if (k >= min && k <= max){ for (j = k + 1; j < matrix_size; j++){ matrix[k][j] = matrix[k][j] / matrix[k][k]; } } #pragma omp barrier if ((k + 1) > min) i = k + 1; else i = min; for (;i <= max; i++){ for (j = k + 1; j < matrix_size; j++){ matrix[i][j] = matrix[i][j] - matrix[i][k] * matrix[k][j]; } } } } } int main(){ int matrix_size; printf("Enter matrix size: "); scanf("%d", &matrix_size); double **matrix = (double**)malloc(matrix_size * sizeof(double*)); for (int i = 0; i < matrix_size; i++){ matrix[i] = (double *)malloc(matrix_size * sizeof(double)); } printf("Enter matrix element: "); for (int i = 0; i < matrix_size; i++){ for (int j = 0; j < matrix_size; j++){ scanf("%lf", &matrix[i][j]); } } printf("Entered matrix: \n"); for (int i = 0; i < matrix_size; i++){ for (int j = 0; j < matrix_size; j++){ printf("%lf ", matrix[i][j]); } printf("\n"); } int num_threads; printf("Enter number of threads: "); scanf("%d", &num_threads); printf("\n"); omp_set_num_threads(num_threads); printf("Starting factorization using %d threads\n", num_threads); factorize(matrix, matrix_size); printf("Resultant matrix: \n"); printf("Matrix L: \n"); for (int i = 0; i < matrix_size; i++){ for (int j = 0; j < matrix_size; j++){ if (j > i) printf("%lf ", 0.0); else printf("%lf ", matrix[i][j]); } printf("\n"); } printf("Matrix R: \n"); for (int i = 0; i < matrix_size; i++){ for (int j = 0; j < matrix_size; j++){ if (i == j) printf("%lf ", 1.0); else if (i > j) printf("%lf ", 0.0); else printf("%lf ", matrix[i][j]); } printf("\n"); } } /** Sample input for verification: int size = 3 matrix: 2.000000 2.000000 3.000000 5.000000 9.000000 10.000000 4.000000 1.000000 2.000000 expected output: Matrix L: 2.000000 0.000000 0.000000 5.000000 4.000000 0.000000 4.000000 -3.000000 -2.125000 Matrix R: 1.000000 1.000000 1.500000 0.000000 1.000000 0.625000 0.000000 0.000000 1.000000 **/
labyrinth.c
/* ============================================================================= * * labyrinth.c * * ============================================================================= * * Copyright (C) Stanford University, 2006. All Rights Reserved. * Author: Chi Cao Minh * * ============================================================================= * * For the license of bayes/sort.h and bayes/sort.c, please see the header * of the files. * * ------------------------------------------------------------------------ * * For the license of kmeans, please see kmeans/LICENSE.kmeans * * ------------------------------------------------------------------------ * * For the license of ssca2, please see ssca2/COPYRIGHT * * ------------------------------------------------------------------------ * * For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the * header of the files. * * ------------------------------------------------------------------------ * * For the license of lib/rbtree.h and lib/rbtree.c, please see * lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree * * ------------------------------------------------------------------------ * * Unless otherwise noted, the following license applies to STAMP files: * * Copyright (c) 2007, Stanford University * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of Stanford University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * ============================================================================= */ #include <assert.h> #include <getopt.h> #include <stdio.h> #include <stdlib.h> #include "list.h" #include "maze.h" #include "router.h" #include "thread.h" #include "timer.h" #include "types.h" #include "../lib/instrument_roi.h" enum param_types { PARAM_BENDCOST = (unsigned char)'b', PARAM_THREAD = (unsigned char)'t', PARAM_XCOST = (unsigned char)'x', PARAM_YCOST = (unsigned char)'y', PARAM_ZCOST = (unsigned char)'z', }; enum param_defaults { PARAM_DEFAULT_BENDCOST = 1, PARAM_DEFAULT_THREAD = 1, PARAM_DEFAULT_XCOST = 1, PARAM_DEFAULT_YCOST = 1, PARAM_DEFAULT_ZCOST = 2, }; bool_t global_doPrint = FALSE; char* global_inputFile = NULL; long global_params[256]; /* 256 = ascii limit */ /* ============================================================================= * displayUsage * ============================================================================= */ static void displayUsage (const char* appName) { printf("Usage: %s [options]\n", appName); puts("\nOptions: (defaults)\n"); printf(" b <INT> [b]end cost (%i)\n", PARAM_DEFAULT_BENDCOST); printf(" i <FILE> [i]nput file name (%s)\n", global_inputFile); printf(" p [p]rint routed maze (false)\n"); printf(" t <UINT> Number of [t]hreads (%i)\n", PARAM_DEFAULT_THREAD); printf(" x <UINT> [x] movement cost (%i)\n", PARAM_DEFAULT_XCOST); printf(" y <UINT> [y] movement cost (%i)\n", PARAM_DEFAULT_YCOST); printf(" z <UINT> [z] movement cost (%i)\n", PARAM_DEFAULT_ZCOST); exit(1); } /* ============================================================================= * setDefaultParams * ============================================================================= */ static void setDefaultParams () { global_params[PARAM_BENDCOST] = PARAM_DEFAULT_BENDCOST; global_params[PARAM_THREAD] = PARAM_DEFAULT_THREAD; global_params[PARAM_XCOST] = PARAM_DEFAULT_XCOST; global_params[PARAM_YCOST] = PARAM_DEFAULT_YCOST; global_params[PARAM_ZCOST] = PARAM_DEFAULT_ZCOST; } /* ============================================================================= * parseArgs * ============================================================================= */ static void parseArgs (long argc, char* const argv[]) { long i; long opt; opterr = 0; setDefaultParams(); while ((opt = getopt(argc, argv, "b:i:pt:x:y:z:")) != -1) { switch (opt) { case 'b': case 't': case 'x': case 'y': case 'z': global_params[(unsigned char)opt] = atol(optarg); break; case 'i': global_inputFile = optarg; break; case 'p': global_doPrint = TRUE; break; case '?': default: opterr++; break; } } for (i = optind; i < argc; i++) { fprintf(stderr, "Non-option argument: %s\n", argv[i]); opterr++; } if (opterr) { displayUsage(argv[0]); } } /* ============================================================================= * main * ============================================================================= */ MAIN(argc, argv) { /* * Initialization */ parseArgs(argc, (char** const)argv); SIM_GET_NUM_CPU(numThread); long numThread = global_params[PARAM_THREAD]; TM_STARTUP(numThread); P_MEMORY_STARTUP(numThread); maze_t* mazePtr; long numPathToRoute; router_t* routerPtr; list_t* pathVectorListPtr; router_solve_arg_t routerArg; long numPathRouted; TM_THREAD_ENTER(); TM_BEGIN(); mazePtr = maze_alloc(); assert(mazePtr); numPathToRoute = maze_read(mazePtr, global_inputFile); routerPtr = router_alloc(global_params[PARAM_XCOST], global_params[PARAM_YCOST], global_params[PARAM_ZCOST], global_params[PARAM_BENDCOST]); assert(routerPtr); pathVectorListPtr = list_alloc(NULL); assert(pathVectorListPtr); /* * Run transactions */ routerArg = {routerPtr, mazePtr, pathVectorListPtr}; TM_END(); thread_startup(numThread); // NB: Since ASF/PTLSim "REAL" is native execution, and since we are using // wallclock time, we want to be sure we read time inside the // simulator, or else we report native cycles spent on the benchmark // instead of simulator cycles. //GOTO_SIM(); TIMER_T startTime, stopTime; //TIMER_READ(startTime); BEGIN_ROI; #ifdef OTM #pragma omp parallel { router_solve((void *)&routerArg); } #else thread_start(router_solve, (void*)&routerArg); #endif END_ROI; //TIMER_T stopTime; //TIMER_READ(stopTime); // NB: As above, timer reads must be done inside of the simulated region // for PTLSim/ASF //GOTO_REAL(); TM_BEGIN(); numPathRouted = 0; list_iter_t it; list_iter_reset(&it, pathVectorListPtr); while (list_iter_hasNext(&it, pathVectorListPtr)) { vector_t* pathVectorPtr = (vector_t*)list_iter_next(&it, pathVectorListPtr); numPathRouted += vector_getSize(pathVectorPtr); } printf("Paths routed = %li\n", numPathRouted); printf("Elapsed time = %f seconds\n", TIMER_DIFF_SECONDS(startTime, stopTime)); /* * Check solution and clean up */ assert(numPathRouted <= numPathToRoute); bool_t status = maze_checkPaths(mazePtr, pathVectorListPtr, global_doPrint); //assert(status == TRUE); puts("Verification passed."); maze_free(mazePtr); router_free(routerPtr); TM_END(); TM_SHUTDOWN(); P_MEMORY_SHUTDOWN(); thread_shutdown(); MAIN_RETURN(0); } /* ============================================================================= * * End of labyrinth.c * * ============================================================================= */
update_ops_named_X.c
#include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif //void X_gate_old(UINT target_qubit_index, CTYPE *state, ITYPE dim); //void X_gate_single(UINT target_qubit_index, CTYPE *state, ITYPE dim); //void X_gate_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim); void X_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) { //UINT threshold = 13; //X_gate_old(target_qubit_index, state, dim); //X_gate_single(target_qubit_index, state, dim); //X_gate_single_simd(target_qubit_index, state, dim); //X_gate_single_unroll(target_qubit_index, state, dim); //X_gate_parallel(target_qubit_index, state, dim); //return; #ifdef _USE_SIMD #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { X_gate_single_simd(target_qubit_index, state, dim); } else { X_gate_parallel_simd(target_qubit_index, state, dim); } #else X_gate_single_simd(target_qubit_index, state, dim); #endif #else #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { X_gate_single_unroll(target_qubit_index, state, dim); } else { X_gate_parallel_unroll(target_qubit_index, state, dim); } #else X_gate_single_unroll(target_qubit_index, state, dim); #endif #endif } void X_gate_single_unroll(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; if (target_qubit_index == 0) { ITYPE basis_index = 0; for (basis_index = 0; basis_index < dim; basis_index += 2) { CTYPE temp = state[basis_index]; state[basis_index] = state[basis_index + 1]; state[basis_index + 1] = temp; } } else { for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; CTYPE temp0 = state[basis_index_0]; CTYPE temp1 = state[basis_index_0+1]; state[basis_index_0] = state[basis_index_1]; state[basis_index_0+1] = state[basis_index_1+1]; state[basis_index_1] = temp0; state[basis_index_1+1] = temp1; } } } #ifdef _OPENMP void X_gate_parallel_unroll(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; if (target_qubit_index == 0) { ITYPE basis_index = 0; #pragma omp parallel for for (basis_index = 0; basis_index < dim; basis_index += 2) { CTYPE temp = state[basis_index]; state[basis_index] = state[basis_index + 1]; state[basis_index + 1] = temp; } } else { #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; CTYPE temp0 = state[basis_index_0]; CTYPE temp1 = state[basis_index_0 + 1]; state[basis_index_0] = state[basis_index_1]; state[basis_index_0 + 1] = state[basis_index_1 + 1]; state[basis_index_1] = temp0; state[basis_index_1 + 1] = temp1; } } } #endif #ifdef _USE_SIMD void X_gate_single_simd(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; //double* cast_state = (double*)state; if (target_qubit_index == 0) { ITYPE basis_index = 0; for (basis_index = 0; basis_index < dim; basis_index += 2) { double* ptr = (double*)(state + basis_index); __m256d data = _mm256_loadu_pd(ptr); data = _mm256_permute4x64_pd(data, 78); // (3210) -> (1032) : 1*2 + 4*3 + 16*0 + 64*1 = 2+12+64=78 _mm256_storeu_pd(ptr, data); } } else { for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; double* ptr0 = (double*)(state + basis_index_0); double* ptr1 = (double*)(state + basis_index_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); _mm256_storeu_pd(ptr1, data0); _mm256_storeu_pd(ptr0, data1); } } } #ifdef _OPENMP void X_gate_parallel_simd(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; //double* cast_state = (double*)state; if (target_qubit_index == 0) { ITYPE basis_index = 0; #pragma omp parallel for for (basis_index = 0; basis_index < dim; basis_index += 2) { double* ptr = (double*)(state + basis_index); __m256d data = _mm256_loadu_pd(ptr); data = _mm256_permute4x64_pd(data, 78); // (3210) -> (1032) : 1*2 + 4*3 + 16*0 + 64*1 = 2+12+64=78 _mm256_storeu_pd(ptr, data); } } else { #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; double* ptr0 = (double*)(state + basis_index_0); double* ptr1 = (double*)(state + basis_index_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); _mm256_storeu_pd(ptr1, data0); _mm256_storeu_pd(ptr0, data1); } } } #endif #endif /* void X_gate_old(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); ITYPE state_index; #ifdef _OPENMP //#pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = insert_zero_to_basis_index(state_index, mask, target_qubit_index); ITYPE basis_index_1 = basis_index_0 ^ mask; swap_amplitude(state, basis_index_0, basis_index_1); } } void X_gate_single(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = state[basis_index_1]; state[basis_index_1] = temp; } } #ifdef _OPENMP void X_gate_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = state[basis_index_1]; state[basis_index_1] = temp; } } #endif */
blas1_dispatch_vector.h
#ifndef _DG_BLAS_STD_VECTOR_ #define _DG_BLAS_STD_VECTOR_ #ifdef DG_DEBUG #include <cassert> #endif //DG_DEBUG #include <vector> #include <array> #include "blas1_dispatch_shared.h" #include "vector_categories.h" #include "tensor_traits.h" #ifdef _OPENMP #include <omp.h> #endif //_OPENMP ///@cond namespace dg { template<class to_ContainerType, class from_ContainerType, class ...Params> inline to_ContainerType construct( const from_ContainerType& src, Params&& ...ps); template<class from_ContainerType, class to_ContainerType, class ...Params> inline void assign( const from_ContainerType&, to_ContainerType&, Params&& ...ps); namespace detail{ template<class To, class From, class ...Params> To doConstruct( const From& src, ArrayVectorTag, SharedVectorTag, Params&&...ps ) { To t; using inner_vector = typename To::value_type; for (unsigned i=0; i<t.size(); i++) t[i] = dg::construct<inner_vector>(src, std::forward<Params>(ps)...); return t; } template<class To, class From, class ...Params> To doConstruct( const From& src, ArrayVectorTag, MPIVectorTag, Params&&...ps ) { To t; using inner_vector = typename To::value_type; for (unsigned i=0; i<t.size(); i++) t[i] = dg::construct<inner_vector>(src, std::forward<Params>(ps)...); return t; } template<class To, class From, class ...Params> To doConstruct( const From& src, ArrayVectorTag, RecursiveVectorTag, Params&&...ps ) { To t; using inner_vector = typename To::value_type; for (unsigned i=0; i<t.size(); i++) t[i] = dg::construct<inner_vector>(src[i], std::forward<Params>(ps)...); return t; } template<class To, class From, class Size, class ...Params> To doConstruct( const From& src, RecursiveVectorTag, SharedVectorTag, Size size, Params&&... ps ) { To t(size); using inner_vector = typename To::value_type; for (int i=0; i<(int)size; i++) t[i] = dg::construct<inner_vector>(src, std::forward<Params>(ps)...); return t; } template<class To, class From, class Size, class ...Params> To doConstruct( const From& src, RecursiveVectorTag, MPIVectorTag, Size size, Params&&... ps ) { To t(size); using inner_vector = typename To::value_type; for (int i=0; i<(int)size; i++) t[i] = dg::construct<inner_vector>(src, std::forward<Params>(ps)...); return t; } template<class To, class From, class ...Params> To doConstruct( const From& src, RecursiveVectorTag, RecursiveVectorTag, Params&&...ps ) { unsigned size = src.size(); To t(size); using inner_vector = typename To::value_type; for (unsigned i=0; i<size; i++) t[i] = dg::construct<inner_vector>(src[i], std::forward<Params>(ps)...); return t; } template<class From, class To, class ...Params> void doAssign( const From& src, To& to, SharedVectorTag, ArrayVectorTag, Params&&...ps ) { for (unsigned i=0; i<to.size(); i++) dg::assign(src, to[i], std::forward<Params>(ps)...); } template<class From, class To, class ...Params> void doAssign( const From& src, To& to, MPIVectorTag, ArrayVectorTag, Params&&...ps ) { for (unsigned i=0; i<to.size(); i++) dg::assign(src, to[i], std::forward<Params>(ps)...); } template<class From, class To, class ...Params> void doAssign( const From& src, To& to, RecursiveVectorTag, ArrayVectorTag, Params&&...ps ) { for (unsigned i=0; i<to.size(); i++) dg::assign(src[i], to[i], std::forward<Params>(ps)...); } template<class From, class To, class Size, class ...Params> void doAssign( const From& src, To& to, SharedVectorTag, RecursiveVectorTag, Size size, Params&&... ps ) { to.resize(size); for (int i=0; i<(int)size; i++) dg::assign(src, to[i], std::forward<Params>(ps)...); } template<class From, class To, class Size, class ...Params> void doAssign( const From& src, To& to, MPIVectorTag, RecursiveVectorTag, Size size, Params&&... ps ) { to.resize(size); for (int i=0; i<(int)size; i++) dg::assign(src, to[i], std::forward<Params>(ps)...); } template<class From, class To, class ...Params> void doAssign( const From& src, To& to, RecursiveVectorTag, RecursiveVectorTag, Params&&...ps ) { unsigned size = src.size(); to.resize(size); for (unsigned i=0; i<size; i++) dg::assign(src[i], to[i], std::forward<Params>(ps)...); } } //namespace detail namespace blas1 { namespace detail { template< class Vector1, class Vector2> inline std::vector<int64_t> doDot_superacc( const Vector1& x1, const Vector2& x2, RecursiveVectorTag) { //find out which one is the RecursiveVector and determine size constexpr unsigned vector_idx = find_if_v<dg::is_not_scalar, Vector1, Vector1, Vector2>::value; auto size = get_idx<vector_idx>(x1,x2).size(); std::vector<int64_t> acc( exblas::BIN_COUNT, (int64_t)0); for( unsigned i=0; i<size; i++) { std::vector<int64_t> temp = doDot_superacc( do_get_vector_element(x1,i,get_tensor_category<Vector1>()), do_get_vector_element(x2,i,get_tensor_category<Vector2>())); int imin = exblas::IMIN, imax = exblas::IMAX; exblas::cpu::Normalize( &(temp[0]), imin, imax); for( int k=exblas::IMIN; k<=exblas::IMAX; k++) acc[k] += temp[k]; if( (i+1)%128 == 0) { imin = exblas::IMIN, imax = exblas::IMAX; exblas::cpu::Normalize( &(acc[0]), imin, imax); } } return acc; } ///////////////////////////////////////////////////////////////////////////////////// #ifdef _OPENMP //omp tag implementation template< class size_type, class Subroutine, class container, class ...Containers> inline void doSubroutine_dispatch( RecursiveVectorTag, OmpTag, size_type size, Subroutine f, container&& x, Containers&&... xs) { //using inner_container = typename std::decay<container>::type::value_type; if( !omp_in_parallel())//to catch recursive calls { #pragma omp parallel { for( int i=0; i<(int)size; i++) {//omp sometimes has problems if loop variable is not int dg::blas1::subroutine( f, do_get_vector_element(std::forward<container>(x),i,get_tensor_category<container>()), do_get_vector_element(std::forward<Containers>(xs),i,get_tensor_category<Containers>())...); } } } else //we are already in a parallel omp region for( int i=0; i<(int)size; i++) { dg::blas1::subroutine( f, do_get_vector_element(std::forward<container>(x),i,get_tensor_category<container>()), do_get_vector_element(std::forward<Containers>(xs),i,get_tensor_category<Containers>())...); } } #endif //_OPENMP //any tag implementation (recursively call dg::blas1::subroutine) template<class size_type, class Subroutine, class container, class ...Containers> inline void doSubroutine_dispatch( RecursiveVectorTag, AnyPolicyTag, size_type size, Subroutine f, container&& x, Containers&&... xs) { for( int i=0; i<(int)size; i++) { dg::blas1::subroutine( f, do_get_vector_element(std::forward<container>(x),i,get_tensor_category<container>()), do_get_vector_element(std::forward<Containers>(xs),i,get_tensor_category<Containers>())...); } } //dispatch template< class Subroutine, class container, class ...Containers> inline void doSubroutine( RecursiveVectorTag, Subroutine f, container&& x, Containers&&... xs) { constexpr unsigned vector_idx = find_if_v<dg::is_not_scalar, get_value_type<container>, container, Containers...>::value; auto size = get_idx<vector_idx>( std::forward<container>(x), std::forward<Containers>(xs)...).size(); using vector_type = find_if_t<dg::has_not_any_policy, get_value_type<container>, container, Containers...>; doSubroutine_dispatch( RecursiveVectorTag(), get_execution_policy<vector_type>(), size, f, std::forward<container>( x), std::forward<Containers>( xs)...); } template<class T, class ContainerType, class BinaryOp> inline T doReduce( RecursiveVectorTag, const ContainerType& x, T init, BinaryOp op) { //reduce sequentially recursively for ( unsigned u=0; u<x.size(); u++) { init = op( init, dg::blas1::reduce( x[u], init, op)); } return init; } } //namespace detail } //namespace blas1 } //namespace dg ///@endcond #endif //_DG_BLAS_STD_VECTOR_
neuron.h
/* * Architektury výpočetních systémů (AVS 2019) * Projekt č. 1 (ANN) * Login: xharmi00 */ #ifndef NEURON_H #define NEURON_H #include <cstdlib> /** * @brief Returns output of the neuron as product of inputs, sums and bias. * * @param inputSize - number of inputs of the neuron * @param input - pointer to neuron input array (identical for all * neurons in the layer) * @param weights - pointer to weights for the neuron * @param bias - bias value of the neuron * @return Output of the neuron. */ // #pragma omp declare simd uniform(inputSize, input) linear(weights:784) // #pragma omp declare simd uniform(inputSize, input) linear(weights:512) float evalNeuron( size_t inputSize, const float *input, const float *weights, float bias ); #endif
GB_unop__tanh_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__tanh_fc64_fc64 // op(A') function: GB_unop_tran__tanh_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = ctanh (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ctanh (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = ctanh (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TANH || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__tanh_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ctanh (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ctanh (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__tanh_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tinyexr.h
/* Copyright (c) 2014 - 2019, Syoyo Fujita and many contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Syoyo Fujita nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- #ifndef TINYEXR_H_ #define TINYEXR_H_ // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif // Use embedded miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_THREAD #define TINYEXR_USE_THREAD (0) // No threaded loading. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_OPENMP #ifdef _OPENMP #define TINYEXR_USE_OPENMP (1) #else #define TINYEXR_USE_OPENMP (0) #endif #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-6) #define TINYEXR_ERROR_CANT_OPEN_FILE (-7) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8) #define TINYEXR_ERROR_INVALID_HEADER (-9) #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10) #define TINYEXR_ERROR_CANT_WRITE_FILE (-11) #define TINYEXR_ERROR_SERIALZATION_FAILED (-12) #define TINYEXR_ERROR_LAYER_NOT_FOUND (-13) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_HEADER_ATTRIBUTES (1024) #define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 int tiled; // tile format image int long_name; // long name attribute int non_image; // deep image(EXR 2.0) int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; int data_window[4]; int display_window[4]; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute *custom_attributes; // array of EXRAttribute. size = // `num_custom_attributes`. EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { For backward compatibility. Not recommended to use. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // Loads single-frame OpenEXR image by specifing layer name. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error // When the specified layer name is not found in the EXR file, the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`. extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layer_name, const char **err); // // Get layer infos from EXR file. // // @param[out] layer_names List of layer names. Application must free memory after using this. // @param[out] num_layers The number of layers // @param[out] err Error string(wll be filled when the function returns error code). Free it using FreeEXRErrorMessage after using this value. // // @return TINYEXR_SUCCEES upon success. // extern int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err); // @deprecated { to be removed. } // Simple wrapper API for ParseEXRHeaderFromFile. // checking given file is a EXR file(by just look up header) // @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for // others extern int IsEXR(const char *filename); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. // Use ZIP compression by default. // Returns negative value and may set error string in `err` when there's an // error extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename, const char **err); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Free's internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Free's internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Free's error message extern void FreeEXRErrorMessage(const char *msg); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if success. // Return zero and will set error string in `err` when there's an // error. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEIFNED #define TINYEXR_IMPLEMENTATION_DEIFNED #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <sstream> // #include <iostream> // debug #include <limits> #include <string> #include <vector> #if __cplusplus > 199711L // C++11 #include <cstdint> #if TINYEXR_USE_THREAD #include <atomic> #include <thread> #endif #endif // __cplusplus > 199711L #if TINYEXR_USE_OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #else // Issue #46. Please include your own zlib-compatible API header before // including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #include "zfp.h" #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif #if TINYEXR_USE_MINIZ namespace miniz { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #pragma clang diagnostic ignored "-Wundef" #if __has_warning("-Wcomma") #pragma clang diagnostic ignored "-Wcomma" #endif #if __has_warning("-Wmacro-redefined") #pragma clang diagnostic ignored "-Wmacro-redefined" #endif #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #if __has_warning("-Wtautological-constant-compare") #pragma clang diagnostic ignored "-Wtautological-constant-compare" #endif #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif /* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major release with Zip64 support (almost there!): - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug (thanks kahmyong.moon@hp.com) which could cause locate files to not find files. This bug would only have occured in earlier versions if you explicitly used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you can't switch to v1.15 but want to fix this bug, just remove the uses of this flag from both helper funcs (and of course don't use the flag). - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when pUser_read_buf is not NULL and compressed size is > uncompressed size - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract compressed data from directory entries, to account for weird zipfiles which contain zero-size compressed data on dir entries. Hopefully this fix won't cause any issues on weird zip archives, because it assumes the low 16-bits of zip external attributes are DOS attributes (which I believe they always are in practice). - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the internal attributes, just the filename and external attributes - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed - Added cmake support for Linux builds which builds all the examples, tested with clang v3.3 and gcc v4.6. - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti - Merged MZ_FORCEINLINE fix from hdeanclark - Fix <time.h> include before config #ifdef, thanks emil.brink - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping (super useful for OpenGL apps), and explicit control over the compression level (so you can set it to 1 for real-time compression). - Merged in some compiler fixes from paulharris's github repro. - Retested this build under Windows (VS 2010, including static analysis), tcc 0.9.26, gcc v4.6 and clang v3.3. - Added example6.c, which dumps an image of the mandelbrot set to a PNG file. - Modified example2 to help test the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix possible src file fclose() leak if alignment bytes+local header file write faiiled - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the wrong central dir header offset, appears harmless in this release, but it became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect). 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files. - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning). - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64. - Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test. - Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives. - Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.) - Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's. level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <bruced@valvesoftware.com> for the feedback/bug report. 5/28/11 v1.11 - Added statement from unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: - Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a - Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). - Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. - Refactored the compression code for better readability and maintainability. - Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). 5/15/11 v1.09 - Initial stable release. * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED //#include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be // CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on // stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able // to get the current time, or // get/set file times, and the C run-time funcs that get/set times won't be // called. // The current downside is the times written to your archives will be from 1979. #define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. #define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive // API's. //#define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression // API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent // conflicts against stock zlib. //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom // user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's // which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and // tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc // on Linux #define MINIZ_NO_TIME #endif #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) //#include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #if MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient // integer loads and stores from unaligned addresses. //#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \ 0 // disable to suppress compiler warnings #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are // reasonably fast (and don't involve compiler generated calls to helper // functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some // parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! typedef unsigned long mz_ulong; // mz_free() internally uses the MZ_FREE() macro (which by default calls free() // unless you've modified the MZ_MALLOC macro) to release a block allocated from // the heap. void mz_free(void *p); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with // ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with // ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: // items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void (*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_VERSION "9.1.15" #define MZ_VERNUM 0x91F0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 15 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The // other values are for advanced use (refer to the zlib docs). enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels: 0-9 are the standard zlib-style levels, 10 is best // possible compression (not zlib compatible, and may be very slow), // MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been // optimized purely for performance, not ratio. // (This special func. is currently only enabled when // MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with // zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no // header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as // calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input // and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or // MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not // available, and/or there's more output to be written but the output buffer // is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been // written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or // output buffers are empty. (Fill up the input buffer or free up some output // space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of // data that could be generated by deflate(), assuming flush is set to only // MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on // failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of // data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that // controls the window size and whether or not the stream has been wrapped with // a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or // -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the // input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output // buffers are both sized large enough to decompress the entire stream in a // single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside // what's already in the input buffer, and that the output buffer is large // enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or // there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes // have been written. For zlib streams, the adler-32 of the decompressed data // has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is // empty but the inflater needs more input to continue, or if the output // buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when // using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on // failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the // error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used // as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you // use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // An attempt to work around MSVC's spammy "warning C4127: conditional // expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct mz_zip_archive_tag { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is // 0 this function returns the number of bytes needed to fully store the // filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and // modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive // file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient // in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the // archive's filename so it can be reopened for writing. If the file can't be // reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be // growable using the realloc callback (which defaults to realloc unless you've // overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's // user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what // you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record // the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a // forwardslash with empty buffer. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records // the disk file's modified time into the archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no // recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by // the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive // struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be // valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if // mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) // appends a memory blob to a ZIP archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags used by tinfl_decompress(). // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and // ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the // input is a raw deflate stream. // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available // beyond the end of the supplied input buffer. If clear, the input buffer // contains all remaining input. // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large // enough to hold the entire decompressed stream. If clear, the output buffer is // at least the size of the dictionary (typically 32KB). // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the // decompressed bytes. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data // to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must call mz_free() on the returned block when it's no longer // needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block // in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes // written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an // internal 32KB buffer, and a user provided callback function will be called to // flush the buffer. // Returns 1 on success or 0 on failure. typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function // actually needed for decompression. All the other functions are just // high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any // desired higher level decompression API. In the limit case, it can be called // once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly // slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // tdefl_init() compression flags logically OR'd together (low 12 bits contain // the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes // per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap // compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before // the deflate data, and the Adler-32 of the source data at the end. Otherwise, // you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even // when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more // efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's // initialization time to the minimum, but the output may vary from run to run // given the same input (depending on the contents of memory). // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. // The low 12 bits are reserved to control the max # of hash probes per // dictionary lookup (see TDEFL_MAX_PROBES_MASK). enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against // the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in // memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be // 1, 2, 3, or 4. // The image pitch in bytes per scanline will be w*num_chans. The leftmost // pixel on the top scanline is stored first in memory. // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL // If flip is true, the image will be flipped on the Y axis (useful for OpenGL // apps). // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must mz_free() the returned heap block (which will typically be // larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write // compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The // above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed // output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper // functions aren't flexible enough. The low-level functions don't make any heap // allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1 } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; // tdefl's compression state structure. typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. // There is no corresponding deinit() function because the tdefl API's do not // dynamically allocate memory. // pBut_buf_func: If NULL, output data will be supplied to the specified // callback. In this case, the user should call the tdefl_compress_buffer() API // for compression. // If pBut_buf_func is NULL the user should always call the tdefl_compress() // API. // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, // etc.) tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the specified input buffer // as possible, and writing as much compressed data to the specified output // buffer as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a // non-NULL tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't // defined, because it uses some of its macros. #ifndef MINIZ_NO_ZLIB_APIS // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be // much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, // MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #endif // #ifndef MINIZ_NO_ZLIB_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want // the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; //#include <assert.h> //#include <string.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void)x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE inline __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C // implementation that balances processor cache usage against speed": // http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c}; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } void mz_free(void *p) { MZ_FREE(p); } #ifndef MINIZ_NO_ZLIB_APIS static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } // static void *def_realloc_func(void *opaque, void *address, size_t items, // size_t size) { // (void)opaque, (void)address, (void)items, (void)size; // return MZ_REALLOC(address, items * size); //} const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty // tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are // large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress( &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some // uncompressed data left in the output dictionary - // oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress // without supplying more input or by setting flush // to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data // when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's // at least 1 more byte on the way. If there's no more room left in the // output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = {{MZ_OK, ""}, {MZ_STREAM_END, "stream end"}, {MZ_NEED_DICT, "need dictionary"}, {MZ_ERRNO, "file error"}, {MZ_STREAM_ERROR, "stream error"}, {MZ_DATA_ERROR, "data error"}, {MZ_MEM_ERROR, "out of memory"}, {MZ_BUF_ERROR, "buf error"}, {MZ_VERSION_ERROR, "version error"}, {MZ_PARAM_ERROR, "parameter error"}}; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif // MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all // compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do { \ for (;;) { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt // to read beyond the input buf, then something is wrong with the input because // the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of // the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) \ do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for (;;) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes // remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode // the next Huffman code (and absolutely no more). It works by trying to fully // decode a // Huffman code by using whatever bits are currently present in the bit buffer. // If this fails, it reads another byte, and tries again until it succeeds or // until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex // than you would initially expect because the zlib API expects the decompressor // to never read // beyond the final byte of the deflate stream. (In other words, when this macro // wants to read another byte from the input, it REALLY needs another byte in // order to fully // decode the next Huffman code.) Handling this properly is particularly // important on raw deflate (non-zlib) streams, which aren't followed by a byte // aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0}; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static const int s_min_table_sizes[3] = {257, 1, 4}; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1ULL << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit: r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress( &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression // API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285}; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0}; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17}; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29}; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted // values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, // alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = \ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS( d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF}; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && // MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output // buffer and send a raw block instead. if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS( d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed // block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN( (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && \ (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better // register utilization. Intended for applications where raw throughput is // valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to // TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output // buffer. if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } #ifndef MINIZ_NO_ZLIB_APIS static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; // level may actually range from [0,10] (10 is a "hidden" max level, where we // want a bit more compression and it's fine if throughput to fall off a cliff // on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #endif // MINIZ_NO_ZLIB_APIS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public // domain: https://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. // This is actually a modification of Alex's original code so PNG files // generated by this function pass pngcheck. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was // defined. static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init( pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06}; mz_uint8 pnghdr[41] = {0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0, (mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0, 0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16), (mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41, 0x54}; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter( "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's // where #defined out) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #error "No arvhive APIs" #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #else #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler // alignment and platform endian issues, miniz.c doesn't use structs for any of // this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ ((element_type *)((array_ptr)->m_p))[index] static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; // On Linux with x86 glibc, this call will fail on large files (>= 0x80000000 // bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } #ifndef MINIZ_NO_TIME static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif // #ifndef MINIZ_NO_TIME #endif // #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central // directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), // but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename( mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for (;;) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); // Basic sanity checks - reject files which are too small, and check the first // 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end // towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; // Read the entire central directory into a heap block, and allocate another // heap block to hold the unsorted central dir file record offsets, and // another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return MZ_FALSE; } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some // basic sanity checking on each record, and check for zip64 entries (which // are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return MZ_FALSE; } file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh( mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; // First see if the filename ends with a '/' character. filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } // Bugfix: This code was also checking if the internal attribute was non-zero, // which wasn't correct. // Most/all zip writers (hopefully) set DOS file/directory attributes in the // low 16-bits, so check for the DOS directory flag and ignore the source OS // ID in the created by field. // FIXME: Remove this check? Is it necessary - we already check the filename. external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & 0x10) != 0) return MZ_TRUE; return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_reader_filename_compare( const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input // buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #endif return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #endif return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input // buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #endif return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32( file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback( pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } #endif // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); #ifdef _MSC_VER if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #else if ((!n) || ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #endif return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc( pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max // size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is // NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can // resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the // user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory // location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir( mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header( pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start // with a forward slash, cannot contain a drive letter, and cannot use // DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment( mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an // allocation fails the file remains unmodified. (A good idea if we're doing // an in-place modification.) if ((!mz_zip_array_ensure_room( pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32( uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer( pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back( pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid // central directory. (This may not always succeed, but we can try.) if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE; if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE; if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #ifdef _MSC_VER #pragma warning(pop) #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ // ---------------------- end of miniz ---------------------------------------- #ifdef __clang__ #pragma clang diagnostic pop #endif } // namespace miniz #else // Reuse MINIZ_LITTE_ENDIAN macro #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #endif // TINYEXR_USE_MINIZ // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static void SetErrorMessage(const std::string &msg, const char **err) { if (err) { #ifdef _WIN32 (*err) = _strdup(msg.c_str()); #else (*err) = strdup(msg.c_str()); #endif } } static const int kEXRVersionSize = 8; static void cpy2(unsigned short *dst_val, const unsigned short *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; } static void swap2(unsigned short *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-function" #endif #ifdef __GNUC__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-function" #endif static void cpy4(int *dst_val, const int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(unsigned int *dst_val, const unsigned int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(float *dst_val, const float *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef __GNUC__ #pragma GCC diagnostic pop #endif static void swap4(unsigned int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } #if 0 static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; } #endif static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 // Reuse MINIZ_LITTLE_ENDIAN flag from miniz. union FP32 { unsigned int u; float f; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif static const char *ReadString(std::string *s, const char *ptr, size_t len) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((size_t(q - ptr) < len) && (*q) != 0) { q++; } if (size_t(q - ptr) >= len) { (*s) = std::string(); return NULL; } (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len == 0) { if ((*type).compare("string") == 0) { // Accept empty string attribute. marker += sizeof(uint32_t); size -= sizeof(uint32_t); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t); data->resize(1); (*data)[0] = '\0'; return true; } else { return false; } } marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen)); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; struct HeaderInfo { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; int data_window[4]; int line_order; int display_window[4]; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; void clear() { channels.clear(); attributes.clear(); data_window[0] = 0; data_window[1] = 0; data_window[2] = 0; data_window[3] = 0; line_order = 0; display_window[0] = 0; display_window[1] = 0; display_window[2] = 0; display_window[3] = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; } }; static bool ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data())); if (data_len < 0) { return false; } p = ReadString(&info.name, p, size_t(data_len)); if ((p == NULL) && (info.name.empty())) { // Buffer overrun. Issue #51. return false; } const unsigned char *data_end = reinterpret_cast<const unsigned char *>(p) + 16; if (data_end >= (data.data() + data.size())) { return false; } memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling)); channels.push_back(info); } return true; } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling)); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(src_size); int ret = miniz::mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (miniz::MZ_OK != ret) { return false; } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { return false; } #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressable run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; // Fixes #116: Add bounds check to in buffer. if ((0 > (maxLength -= count)) || (inLength < 0)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } // Workaround for issue #112. // TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`. if (src_size <= 2) { return false; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); if (ret != static_cast<int>(uncompressed_size)) { return false; } // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierachical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- int len : 8; // code length 0 int lit : 24; // lit p size int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // std::vector<int> hlink(HUF_ENCSIZE); std::vector<long long *> fHeap(HUF_ENCSIZE); *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); std::vector<long long> scode(HUF_ENCSIZE); memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode.data()); memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode >= ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { int *p = pl->p; pl->p = new int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #if 0 #define getCode(po, rlc, c, lc, in, out, ob, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ /* TinyEXR issue 78 */ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } #else static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in, const char *in_end, unsigned short *&out, const unsigned short *ob, const unsigned short *oe) { (void)ob; if (po == rlc) { if (lc < 8) { /* TinyEXR issue 78 */ if ((in + 1) >= in_end) { return false; } getChar(c, lc, in); } lc -= 8; unsigned char cs = (c >> lc); if (out + cs > oe) return false; // Bounds check for safety // Issue 100. if ((out - 1) < ob) return false; unsigned short s = out[-1]; while (cs-- > 0) *out++ = s; } else if (out < oe) { *out++ = po; } else { return false; } return true; } #endif // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; // begin unsigned short *oe = out + no; // end const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; // std::cout << "lit = " << pl.lit << std::endl; // std::cout << "rlc = " << rlc << std::endl; // std::cout << "c = " << c << std::endl; // std::cout << "lc = " << lc << std::endl; // std::cout << "in = " << in << std::endl; // std::cout << "out = " << out << std::endl; // std::cout << "oe = " << oe << std::endl; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) { return false; } break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(std::vector<long long> &freq, const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; std::vector<long long> freq(HUF_ENCSIZE); countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq.data(), &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq.data(), im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, std::vector<unsigned short> *raw) { if (nCompressed == 0) { if (raw->size() != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(), raw->data()); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap.data(), minNonZero, maxNonZero); std::vector<unsigned short> lut(USHORT_RANGE); unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data()); applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSize) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap.data(), 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; // minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr)); // maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } std::vector<unsigned short> lut(USHORT_RANGE); memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data()); // // Huffman decoding // int length; // length = *(reinterpret_cast<const int *>(ptr)); tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); if (size_t((ptr - inPtr) + length) > inLen) { return false; } std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; int precision; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0f; } }; bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) && (attributes[i].size == 1)) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; } } if (!foundType) { return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else { assert(0); } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, int num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = dst_width * dst_num_lines * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((dst_width & 3U) || (dst_num_lines & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, dst_width, dst_num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = dst_width * dst_num_lines; for (int c = 0; c < num_channels; c++) { // decompress 4x4 pixel block. for (int y = 0; y < dst_num_lines; y += 4) { for (int x = 0; x < dst_width; x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * dst_width + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((width & 3U) || (num_lines & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, width, num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = width * num_lines; for (int c = 0; c < num_channels; c++) { // compress 4x4 pixel block. for (int y = 0; y < num_lines; y += 4) { for (int x = 0; x < width; x += 4) { float fblock[16]; for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * width + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = zfp_stream_compressed_size(zfp); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // // TODO(syoyo): Refactor function arguments. static bool DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) { // Invalid input #90 return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); if (!ret) { return false; } // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; // hf.u = line_ptr[u]; // use `cpy` to avoid unaligned memory access when compiler's // optimization is on. tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); if (dstLen == 0) { return false; } if (!tinyexr::DecompressRle( reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, attributes, num_attributes)) { assert(0); return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // address may not be aliged. use byte-wise copy for safety.#76 // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { if (reinterpret_cast<const unsigned char *>(line_ptr + u) >= (data_ptr + data_len)) { // Corrupsed data? return false; } unsigned int val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } return true; } static bool DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { assert(tile_offset_x * tile_size_x < data_width); assert(tile_offset_y * tile_size_y < data_height); // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { // ??? return false; } } return true; } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; info->data_window[0] = 0; info->data_window[1] = 0; info->data_window[2] = 0; info->data_window[3] = 0; info->line_order = 0; // @fixme info->display_window[0] = 0; info->display_window[1] = 0; info->display_window[2] = 0; info->display_window[3] = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) { if (0 == size) { if (err) { (*err) += "Insufficient data size for attributes.\n"; } return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { if (err) { (*err) += "Failed to read attribute.\n"; } return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (version->tiled && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!ReadChannelInfo(info->channels, data)) { if (err) { (*err) += "Failed to parse channel info.\n"; } return TINYEXR_ERROR_INVALID_DATA; } if (info->channels.size() < 1) { if (err) { (*err) += "# of channels is zero.\n"; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { if (data.size() >= 16) { memcpy(&info->data_window[0], &data.at(0), sizeof(int)); memcpy(&info->data_window[1], &data.at(4), sizeof(int)); memcpy(&info->data_window[2], &data.at(8), sizeof(int)); memcpy(&info->data_window[3], &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3])); has_data_window = true; } } else if (attr_name.compare("displayWindow") == 0) { if (data.size() >= 16) { memcpy(&info->display_window[0], &data.at(0), sizeof(int)); memcpy(&info->display_window[1], &data.at(4), sizeof(int)); memcpy(&info->display_window[2], &data.at(8), sizeof(int)); memcpy(&info->display_window[3], &data.at(12), sizeof(int)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[3])); has_display_window = true; } } else if (attr_name.compare("lineOrder") == 0) { if (data.size() >= 1) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } } else if (attr_name.compare("pixelAspectRatio") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio)); has_pixel_aspect_ratio = true; } } else if (attr_name.compare("screenWindowCenter") == 0) { if (data.size() >= 8) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[1])); has_screen_window_center = true; } } else if (attr_name.compare("screenWindowWidth") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_width)); has_screen_window_width = true; } } else if (attr_name.compare("chunkCount") == 0) { if (data.size() >= sizeof(int)) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count)); } } else { // Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header or invalid." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window[0] = info.display_window[0]; exr_header->display_window[1] = info.display_window[1]; exr_header->display_window[2] = info.display_window[2]; exr_header->display_window[3] = info.display_window[3]; exr_header->data_window[0] = info.data_window[0]; exr_header->data_window[1] = info.data_window[1]; exr_header->data_window[2] = info.data_window[2]; exr_header->data_window[3] = info.data_window[3]; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); if (exr_header->num_custom_attributes > 0) { // TODO(syoyo): Report warning when # of attributes exceeds // `TINYEXR_MAX_CUSTOM_ATTRIBUTES` if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) { exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES; } exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc( sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes))); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy poiner exr_header->custom_attributes[i].value = info.attributes[i].value; } } else { exr_header->custom_attributes = NULL; } exr_header->header_len = info.header_len; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const std::vector<tinyexr::tinyexr_uint64> &offsets, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; if ((data_width < 0) || (data_height < 0)) { if (err) { std::stringstream ss; ss << "Invalid data width or data height: " << data_width << ", " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } // Do not allow too large data_width and data_height. header invalid? { const int threshold = 1024 * 8192; // heuristics if ((data_width > threshold) || (data_height > threshold)) { if (err) { std::stringstream ss; ss << "data_with or data_height too large. data_width: " << data_width << ", " << "data_height = " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } } size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety. if (exr_header->tiled) { // value check if (exr_header->tile_size_x < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } if (exr_header->tile_size_y < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } size_t num_tiles = offsets.size(); // = # of blocks exr_image->tiles = static_cast<EXRTile *>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); int err_code = TINYEXR_SUCCESS; #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<size_t> tile_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_tiles)) { num_threads = int(num_tiles); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { size_t tile_idx = 0; while ((tile_idx = tile_count++) < num_tiles) { #else for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) { #endif // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) if (offsets[tile_idx] + sizeof(int) * 5 > size) { // TODO(LTE): atomic if (err) { (*err) += "Insufficient data size.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; break; } size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4( reinterpret_cast<unsigned int *>(&tile_coordinates[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&tile_coordinates[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&tile_coordinates[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&tile_coordinates[3])); // @todo{ LoD } if (tile_coordinates[2] != 0) { err_code = TINYEXR_ERROR_UNSUPPORTED_FEATURE; break; } if (tile_coordinates[3] != 0) { err_code = TINYEXR_ERROR_UNSUPPORTED_FEATURE; break; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len < 4 || size_t(data_len) > data_size) { // TODO(LTE): atomic if (err) { (*err) += "Insufficient data length.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; break; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; bool ret = tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); if (!ret) { // TODO(LTE): atomic if (err) { (*err) += "Failed to decode tile data.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; } exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) } })); } // num_thread loop for (auto &t : workers) { t.join(); } #else } #endif if (err_code != TINYEXR_SUCCESS) { return err_code; } exr_image->num_tiles = static_cast<int>(num_tiles); } else { // scanline format // Don't allow too large image(256GB * pixel_data_size or more). Workaround // for #104. size_t total_data_len = size_t(data_width) * size_t(data_height) * size_t(num_channels); const bool total_data_len_overflown = sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false; if ((total_data_len == 0) || total_data_len_overflown) { if (err) { std::stringstream ss; ss << "Image data size is zero or too large: width = " << data_width << ", height = " << data_height << ", channels = " << num_channels << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> y_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_blocks)) { num_threads = int(num_blocks); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int y = 0; while ((y = y_count++) < int(num_blocks)) { #else #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { #endif size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (size_t(data_len) > data_size) { invalid_data = true; } else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) { // Too large value. Assume this is invalid // 2**20 = 1048576 = heuristic value. invalid_data = true; } else if (data_len == 0) { // TODO(syoyo): May be ok to raise the threshold for example // `data_len < 4` invalid_data = true; } else { // line_no may be negative. int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window[3] + 1)); int num_lines = end_line_no - line_no; if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y // overflow check tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window[1]); if (lno > std::numeric_limits<int>::max()) { line_no = -1; // invalid } else if (lno < -std::numeric_limits<int>::max()) { line_no = -1; // invalid } else { line_no -= exr_header->data_window[1]; } if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>( exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif } if (invalid_data) { if (err) { std::stringstream ss; (*err) += "Invalid data found when decoding pixels.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0]; if (data_width >= std::numeric_limits<int>::max()) { // Issue 63 tinyexr::SetErrorMessage("Invalid data width value", err); return TINYEXR_ERROR_INVALID_DATA; } data_width++; int data_height = exr_header->data_window[3] - exr_header->data_window[1]; if (data_height >= std::numeric_limits<int>::max()) { tinyexr::SetErrorMessage("Invalid data height value", err); return TINYEXR_ERROR_INVALID_DATA; } data_height++; if ((data_width < 0) || (data_height < 0)) { tinyexr::SetErrorMessage("data width or data height is negative.", err); return TINYEXR_ERROR_INVALID_DATA; } // Do not allow too large data_width and data_height. header invalid? { const int threshold = 1024 * 8192; // heuristics if (data_width > threshold) { tinyexr::SetErrorMessage("data width too large.", err); return TINYEXR_ERROR_INVALID_DATA; } if (data_height > threshold) { tinyexr::SetErrorMessage("data height too large.", err); return TINYEXR_ERROR_INVALID_DATA; } } // Read offset tables. size_t num_blocks = 0; if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); } else if (exr_header->tiled) { // @todo { LoD } size_t num_x_tiles = static_cast<size_t>(data_width) / static_cast<size_t>(exr_header->tile_size_x); if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) < static_cast<size_t>(data_width)) { num_x_tiles++; } size_t num_y_tiles = static_cast<size_t>(data_height) / static_cast<size_t>(exr_header->tile_size_y); if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) < static_cast<size_t>(data_height)) { num_y_tiles++; } num_blocks = num_x_tiles * num_y_tiles; } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } } std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks); for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; // Issue #81 if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { tinyexr::SetErrorMessage( "Cannot reconstruct lineOffset table in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } } } { std::string e; int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } #if 1 FreeEXRImage(exr_image); #else // release memory(if exists) if ((exr_header->num_channels > 0) && exr_image && exr_image->images) { for (size_t c = 0; c < size_t(exr_header->num_channels); c++) { if (exr_image->images[c]) { free(exr_image->images[c]); exr_image->images[c] = NULL; } } free(exr_image->images); exr_image->images = NULL; } #endif } return ret; } } static void GetLayers(const EXRHeader& exr_header, std::vector<std::string>& layer_names) { // Naive implementation // Group channels by layers // go over all channel names, split by periods // collect unique names layer_names.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string full_name(exr_header.channels[c].name); const size_t pos = full_name.find_last_of('.'); if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) { full_name.erase(pos); if (std::find(layer_names.begin(), layer_names.end(), full_name) == layer_names.end()) layer_names.push_back(full_name); } } } struct LayerChannel { explicit LayerChannel (size_t i, std::string n) : index(i) , name(n) {} size_t index; std::string name; }; static void ChannelsInLayer(const EXRHeader& exr_header, const std::string layer_name, std::vector<LayerChannel>& channels) { channels.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string ch_name(exr_header.channels[c].name); if (layer_name.empty()) { const size_t pos = ch_name.find_last_of('.'); if (pos != std::string::npos && pos < ch_name.size()) { ch_name = ch_name.substr(pos + 1); } } else { const size_t pos = ch_name.find(layer_name + '.'); if (pos == std::string::npos) continue; if (pos == 0) { ch_name = ch_name.substr(layer_name.size() + 1); } } LayerChannel ch(size_t(c), ch_name); channels.push_back(ch); } } } // namespace tinyexr int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err) { EXRVersion exr_version; EXRHeader exr_header; InitEXRHeader(&exr_header); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Invalid EXR header.", err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } std::vector<std::string> layer_vec; tinyexr::GetLayers(exr_header, layer_vec); (*num_layers) = int(layer_vec.size()); (*layer_names) = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size()))); for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) { #ifdef _MSC_VER (*layer_names)[c] = _strdup(layer_vec[c].c_str()); #else (*layer_names)[c] = strdup(layer_vec[c].c_str()); #endif } FreeEXRHeader(&exr_header); return TINYEXR_SUCCESS; } int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { return LoadEXRWithLayer(out_rgba, width, height, filename, /* layername */NULL, err); } int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layername, const char **err) { if (out_rgba == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to open EXR file or read version info from EXR file. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } // TODO: Probably limit loading to layers (channels) selected by layer index { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; std::vector<std::string> layer_names; tinyexr::GetLayers(exr_header, layer_names); std::vector<tinyexr::LayerChannel> channels; tinyexr::ChannelsInLayer(exr_header, layername == NULL ? "" : std::string(layername), channels); if (channels.size() < 1) { tinyexr::SetErrorMessage("Layer Not Found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_LAYER_NOT_FOUND; } size_t ch_count = channels.size() < 4 ? channels.size() : 4; for (size_t c = 0; c < ch_count; c++) { const tinyexr::LayerChannel &ch = channels[c]; if (ch.name == "R") { idxR = int(ch.index); } else if (ch.name == "G") { idxG = int(ch.index); } else if (ch.name == "B") { idxB = int(ch.index); } else if (ch.name == "A") { idxA = int(ch.index); } } if (channels.size() == 1) { int chIdx = int(channels.front().index); // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[chIdx][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // Assume RGB(A) if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int IsEXR(const char *filename) { EXRVersion exr_version; int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { return ret; } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { tinyexr::SetErrorMessage( "Invalid argument. `memory` or `exr_header` argument is null in " "ParseEXRHeaderFromMemory()", err); // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Insufficient header/data size.\n", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { tinyexr::SetErrorMessage(err_str, err); } } ConvertHeader(exr_header, info); // transfoer `tiled` from version. exr_header->tiled = version->tiled; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to parse EXR version. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } // TODO(syoyo): Refactor removing same code as used in LoadEXR(). if (exr_header.num_channels == 1) { // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[0][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // TODO(syoyo): Support non RGBA image. if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize < 16) { tinyexr::SetErrorMessage("File size too short " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } size_t SaveEXRImageToMemory(const EXRImage *exr_image, const EXRHeader *exr_header, unsigned char **memory_out, const char **err) { if (exr_image == NULL || memory_out == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err); return 0; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #endif #if TINYEXR_USE_ZFP for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) { if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) { tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression", err); return 0; } } #endif std::vector<unsigned char> memory; // Header { const char header[] = {0x76, 0x2f, 0x31, 0x01}; memory.insert(memory.end(), header, header + 4); } // Version, scanline. { char marker[] = {2, 0, 0, 0}; /* @todo if (exr_header->tiled) { marker[1] |= 0x2; } if (exr_header->long_name) { marker[1] |= 0x4; } if (exr_header->non_image) { marker[1] |= 0x8; } if (exr_header->multipart) { marker[1] |= 0x10; } */ memory.insert(memory.end(), marker, marker + 4); } int num_scanlines = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } // Write attributes. std::vector<tinyexr::ChannelInfo> channels; { std::vector<unsigned char> data; for (int c = 0; c < exr_header->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_header->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_header->channels[c].name); channels.push_back(info); } tinyexr::WriteChannelInfo(data, channels); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_header->compression_type; tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp)); tinyexr::WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char *>(&comp), 1); } { int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3])); tinyexr::WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); tinyexr::WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { float aspectRatio = 1.0f; tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio)); tinyexr::WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float)); } { float center[2] = {0.0f, 0.0f}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[1])); tinyexr::WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float)); } { float w = static_cast<float>(exr_image->width); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char *>(&w), sizeof(float)); } // Custom attributes if (exr_header->num_custom_attributes > 0) { for (int i = 0; i < exr_header->num_custom_attributes; i++) { tinyexr::WriteAttributeToMemory( &memory, exr_header->custom_attributes[i].name, exr_header->custom_attributes[i].type, reinterpret_cast<const unsigned char *>( exr_header->custom_attributes[i].value), exr_header->custom_attributes[i].size); } } { // end of header unsigned char e = 0; memory.push_back(e); } int num_blocks = exr_image->height / num_scanlines; if (num_blocks * num_scanlines < exr_image->height) { num_blocks++; } std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks)); size_t headerSize = memory.size(); tinyexr::tinyexr_uint64 offset = headerSize + static_cast<size_t>(num_blocks) * sizeof( tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable) std::vector<std::vector<unsigned char> > data_list( static_cast<size_t>(num_blocks)); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } } #endif // TOOD(LTE): C++11 thread // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { size_t ii = static_cast<size_t>(i); int start_y = num_scanlines * i; int endY = (std::min)(num_scanlines * (i + 1), exr_image->height); int h = endY - start_y; std::vector<unsigned char> buf( static_cast<size_t>(exr_image->width * h * pixel_data_size)); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f)); // line_ptr[x] = f32.f; tinyexr::cpy4(line_ptr + x, &(f32.f)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned short val = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap2(&val); // line_ptr[x] = val; tinyexr::cpy2(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // line_ptr[x] = h16.u; tinyexr::cpy2(line_ptr + x, &(h16.u)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { float val = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned int val = reinterpret_cast<unsigned int **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } } if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(buf.size()); memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), buf.begin(), buf.begin() + data_len); } else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 8192 + static_cast<unsigned int>( 2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, exr_image->width, h); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), exr_image->width, h, exr_header->num_channels, zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else { assert(0); } } // omp parallel for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size(); } size_t totalSize = static_cast<size_t>(offset); { memory.insert( memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)), reinterpret_cast<unsigned char *>(&offsets.at(0)) + sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks)); } if (memory.size() == 0) { tinyexr::SetErrorMessage("Output memory size is zero", err); return 0; } (*memory_out) = static_cast<unsigned char *>(malloc(totalSize)); memcpy((*memory_out), &memory.at(0), memory.size()); unsigned char *memory_ptr = *memory_out + memory.size(); for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { memcpy(memory_ptr, &data_list[i].at(0), data_list[i].size()); memory_ptr += data_list[i].size(); } return totalSize; // OK } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "wb"); #else FILE *fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if (mem_size == 0) { return TINYEXR_ERROR_SERIALZATION_FAILED; } size_t written_size = 0; if ((mem_size > 0) && mem) { written_size = fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); if (written_size != mem_size) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _MSC_VER FILE *fp = NULL; errno_t errcode = fopen_s(&fp, filename, "rb"); if ((0 != errcode) || (!fp)) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); tinyexr::SetErrorMessage("File size is zero : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { tinyexr::SetErrorMessage("Invalid magic number", err); return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { tinyexr::SetErrorMessage("Unsupported version or scanline", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { marker++; size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { std::stringstream ss; ss << "Failed to parse attribute\n"; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { std::stringstream ss; ss << "Unsupported compression type : " << compression_type; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!tinyexr::ReadChannelInfo(channels, data)) { tinyexr::SetErrorMessage("Failed to parse channel info", err); return TINYEXR_ERROR_INVALID_DATA; } num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { tinyexr::SetErrorMessage("Invalid channels format", err); return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh)); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&h)); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { tinyexr::SetErrorMessage("Unsupported compression format", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) { return false; } assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); if (dstLen) { if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize))) { return false; } assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui; unsigned int *src_ptr = reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); tinyexr::cpy4(&ui, src_ptr); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; const unsigned short *src_ptr = reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::cpy2(&(f16.u), src_ptr); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f; const float *src_ptr = reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); tinyexr::cpy4(&f, src_ptr); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->num_tiles = 0; } void FreeEXRErrorMessage(const char *msg) { if (msg) { free(reinterpret_cast<void *>(const_cast<char *>(msg))); } return; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } if (exr_header->custom_attributes) { free(exr_header->custom_attributes); } return TINYEXR_SUCCESS; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } free(exr_image->tiles); } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("fread() error on " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Data size too short", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage(err_str, err); return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { tinyexr::SetErrorMessage( "`chunkCount' attribute is not found in the header.", err); return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); ConvertHeader(exr_header, infos[i]); // transfoer `tiled` from version. exr_header->tiled = exr_version->tiled; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromFile()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromMemory()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list; for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> offset_table( static_cast<size_t>(exr_headers[i]->chunk_count)); for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } chunk_offset_table_list.push_back(offset_table); } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> &offset_table = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (size_t c = 0; c < offset_table.size(); c++) { const unsigned char *part_number_addr = memory + offset_table[c] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } } std::string e; int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table, memory, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename, const char **err) { if ((components == 1) || components == 3 || components == 4) { // OK } else { std::stringstream ss; ss << "Unsupported component value : " << components << std::endl; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRHeader header; InitEXRHeader(&header); if ((width < 16) && (height < 16)) { // No compression for small image. header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE; } else { header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP; } EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = {0, 0, 0, 0}; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } int ret = SaveEXRImageToFile(&image, &header, outfilename, err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef __clang__ // zero-as-null-ppinter-constant #pragma clang diagnostic pop #endif #endif // TINYEXR_IMPLEMENTATION_DEIFNED #endif // TINYEXR_IMPLEMENTATION
ninja_filter.c
/* NINJA-OPS: NINJA Is Not Just Another - OTU Picking Solution Short-read filtering, processing, and denoising program. http://ninja-ops.ninja This program performs filtering of the input reads by various means. Compilation information (GCC): Ascribes to std=gnu99 multi-platform. Use -fopenmp if available for SMP Flags: -m64 -O3 -std=gnu99 -fwhole-program [-fopenmp] ninja_filter.c Compilation directives (-D ...) exist to change k-mer behavior. USE_QSORT may be set to use the faster sort in qsort.h PACKSIZE= may be set for 4, 8, 16, 32, or 64-mers. DO_K_ENDPIECE can be set to enable end-piece consideration in the default k-mer denoising algorithm DO_DEEP_K_DENOISE can be set to use a much stricter k-mer denoising algorithm (also considers endpieces) */ #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <time.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_QSORT #include "qsort.h" #endif #define NINJA_VER "1.5.1" #define PRINT_USAGE() \ {\ printf( "\nNINJA Is Not Just Another - OTU Picking Solution v" NINJA_VER "\n");\ printf( "Short-read filtering, processing, and denoising program. Usage:\n");\ printf( "ninja_filter in_reads.fna [PE in_reads2.fa] out_PREFIX [<trim>] [RC] \n" \ "[D [x[.y]]] [CN] [LOG] [ST]\n" ); \ printf("\nINPUT PARAMETERS:\n");\ printf( "in_reads.fa: the reads you wish to process\n");\ printf("[PE in_reads2.fa] (optional): paired-end; include pairs in in_reds2.fa\n"); \ printf( "\n" "OUTPUT PARAMETERS:\n");\ printf( "out_PREFIX: prefix for all output files produced\n");\ printf( "<trim[,trim2]> (optional): the number of bases to keep (comma for R2)\n");\ printf( "[RC] (optional): Reverse-complement input sequences\n");\ printf( "[D] <x.y> (optional): Denoise [duplicates x, kmer duplicates/1000 y]\n");\ printf( "Note: using .y discards reads with k-mers < y*1000 duplicates.\n");\ printf( "[CN] (optional): Convert ambigous bases to A's instead of discarding them\n"); \ printf( "[LOG] (optional): Outputs which sequences were filtered out\n"); \ printf( "[ST] (optional): Run k-mer filter with a single thread\n"); \ exit(2);\ } #define LINELEN UINT16_MAX #ifndef PACKSIZE #define PACKSIZE 32 #endif #if PACKSIZE==64 #define WTYPE __uint128_t #define SEQPACKS 1024 #define RSHFT 126 #elif PACKSIZE==32 #define WTYPE uint64_t #define SEQPACKS 2048 #define RSHFT 62 #elif PACKSIZE==16 #define WTYPE uint32_t #define SEQPACKS 4096 #define RSHFT 30 #elif PACKSIZE==8 #define WTYPE uint16_t #define SEQPACKS 8192 #define RSHFT 14 #elif PACKSIZE==4 #define WTYPE uint8_t #define SEQPACKS 16384 #define RSHFT 6 #endif //#define SEQPACKS LINELEN/PACKSIZE //#define RSHFT (PACKSIZE*2)-2 char WORDTEMP[PACKSIZE+1] = {0}; typedef struct #if PACKSIZE<64 __attribute__ ((__packed__)) #endif { WTYPE word; uint32_t ix; uint16_t length; } SortBlock2; typedef struct KMerX KMerX; struct //#if PACKSIZE<64 __attribute__ ((__packed__)) //#endif KMerX { WTYPE word; uint64_t count; KMerX *left, *right; }; // Explicit thread memory management KMerX ***KBANK = 0; size_t KBANK_MAXK = 10000, KBANK_INITBINS = 100; size_t *KBANK_BIN =0, *KBANK_BINCNT = 0, *KBANK_IX = 0; #ifdef USE_QSORT void SB2_qsort(SortBlock2 *arr, unsigned n) { #define SB2_LT(a,b) ((a->word < b->word) || \ (a->word == b->word && a->length < b->length)) QSORT(SortBlock2, arr, n, SB2_LT); } #endif WTYPE *C2Xb; char *ACCEPTED; char *X2C = "ACGTNNNNNNNNNNNNNNNN"; char *X2C_RC = "TGCANNNNNNNNNNNNNNNN"; inline char * num2word(WTYPE num, char * word) { int go = 0; for (; go < PACKSIZE; go++) { WTYPE temp = (WTYPE)num >> RSHFT; word[go] = X2C[temp]; num <<= 2; } return word; } inline char * num2wordRC(WTYPE num, char * word) { int go = PACKSIZE-1; for (; go > -1; go--) { WTYPE temp = (WTYPE)num >> RSHFT; word[go] = X2C_RC[temp]; num <<= 2; } return word; } inline char * decodeStringX(WTYPE * Seq, uint16_t length, char *word, char *newString) { unsigned clumps = length/PACKSIZE; if (PACKSIZE*clumps < length) ++clumps; int z = 0; for (; z < clumps-1; z++) num2word(Seq[z],newString + z*PACKSIZE); num2word(Seq[clumps-1],newString+z*PACKSIZE); newString[length] = 0; return newString; } inline char * decodeStringXRC(WTYPE * Seq, uint16_t length, char *word, char *newString) { newString[length] = 0; unsigned clumps = length/PACKSIZE; if (PACKSIZE*clumps < length) ++clumps; int z = clumps-2; for (; z > -1; z--) num2wordRC(Seq[z],newString + length - (z+1) *PACKSIZE); num2wordRC(Seq[clumps-1],word); register int fold = length % PACKSIZE; if (!fold) fold = PACKSIZE; memcpy(newString,word+PACKSIZE-fold, fold); return newString; } // strict comparator int xcmp(WTYPE *Seq1, WTYPE *Seq2, uint16_t len1, uint16_t len2) { unsigned length = len1 < len2 ? len1 : len2; //len1 is min register unsigned clumps = (unsigned)length/PACKSIZE; if (PACKSIZE*clumps < length) ++clumps; int z = 0; for (; z < clumps; ++z) if (Seq1[z]!=Seq2[z]) return Seq1[z] < Seq2[z] ? -1 : 1; return len1 < len2 ? -1 : len1 > len2; } // pre-sorted compactor int ycmp(WTYPE *Seq1, WTYPE *Seq2, uint16_t len1, uint16_t len2) { if (len1 > len2) return 1; // lexicographic guarantee int clumps = (unsigned)len1/PACKSIZE; if (PACKSIZE*clumps < len1) ++clumps; int z = 0; for (; z < clumps-1; ++z) if (Seq1[z]!=Seq2[z]) return 1; // Can differ by length in last clump if (Seq1[z] == Seq2[z]) return 0; if (Seq1[z] > Seq2[z]) return 1; // seq2 must be superset unsigned shift = len1 % PACKSIZE; if (shift) shift = (PACKSIZE - shift) * 2; return (Seq1[z] >> shift) != (Seq2[z] >> shift); } // pre-sorted filter int zcmp(WTYPE *Seq1, WTYPE *Seq2, uint16_t len1, uint16_t len2) { if (len1 != len2) return 1; register unsigned clumps = (unsigned)len1/PACKSIZE; if (PACKSIZE*clumps < len1) ++clumps; int z = 0; for (; z < clumps; ++z) if (Seq1[z]!=Seq2[z]) return 1; return 0; } #ifndef min #define min(a, b) ((a)<=(b) ? (a) : (b)) #endif #define ch(i) *(**(a+i) + depth) #define med3(ia, ib, ic) med3func(a, ia, ib, ic, depth) #define CUTOFF 10 #define MEDCUT 50 // Swaps two characters in a vector inline void swap(char ***a, int i, int j) { char **t = *(a+i); *(a+i) = *(a+j); *(a+j) = t; } inline void vecswap(char ***a, int i, int j, int n) { while (n-->0) swap(a, i++, j++); } // Returns median of ints, used in twrqs inline int med3func(char ***a, int ia, int ib, int ic, int depth) { int va, vb, vc; if ((va=ch(ia)) == (vb=ch(ib))) return ia; if ((vc=ch(ic)) == va || vc == vb) return ic; return va < vb ? (vb < vc ? ib : (va < vc ? ic : ia ) ) : (vb > vc ? ib : (va < vc ? ia : ic ) ); } // Insertion sort delegated to by twrqs inline void inssort(char ***a, int n, int depth) { int i, j; for (i = 1; i < n; i++) for (j = i; j > 0; j--) { if (strcmp(**(a+j-1)+depth, **(a+j)+depth) <= 0) break; swap(a, j, j-1); } } // 3-way Radix Quicksort void twrqs(char ***a, unsigned n, int depth) { if (n < CUTOFF) { inssort(a, n, depth); return; } unsigned pl = 0, pm = n >> 1, d; int le, lt, gt, ge, r, v, pn = n-1; // if large enough, get median of median if (n > MEDCUT) { d = n >> 3; pl = med3(pl, pl+d, pl + (d << 1)); pm = med3(pm-d, pm, pm+d); pn = med3(pn - (d << 1), pn-d, pn); } pm = med3(pl, pm, pn); swap(a, 0, pm); v = ch(0); // grab first letter for (le = 1; le < n && ch(le) == v; le++); if (le == n) { if (v != 0) twrqs(a, n, depth+1); return; } lt = le; gt = ge = n-1; // core QS module; partition the data recursively for (;;) { for ( ; lt <= gt && ch(lt) <= v; lt++) if (ch(lt) == v) swap(a, le++, lt); for ( ; lt <= gt && ch(gt) >= v; gt--) if (ch(gt) == v) swap(a, gt, ge--); if (lt > gt) break; swap(a, lt++, gt--); } r = min(le, lt-le); vecswap(a, 0, lt-r, r); r = min(ge-gt, n-ge-1); vecswap(a, lt, n-r, r); twrqs(a, lt-le, depth); if (v != 0) twrqs(a + lt-le, le + n-ge-1, depth+1); twrqs(a + n-(ge-gt), ge-gt, depth); } inline size_t crBST(char *key, size_t sz, char **String) { char **p = String; while (sz) { size_t w = sz >> 1; char *ref_s = *(p+w+1), *key_s = key; while (*ref_s == *key_s++) if (!*ref_s++) return p+w+1-String; if (*ref_s < *(key_s-1)) { p+=w+1; sz-=w+1; } else sz = w; } char *ref_s = *p, *key_s = key; while (*ref_s == *key_s++) if (!*ref_s++) return p - String; return -1; //return p - String; // replace last 3 lines for unsafe ver } int SB2Cmp(blk1, blk2) register const void *blk1, *blk2; { if (((SortBlock2 *)blk1)->word < ((SortBlock2 *)blk2)->word) return -1; if (((SortBlock2 *)blk1)->word > ((SortBlock2 *)blk2)->word) return 1; if (((SortBlock2 *)blk1)->length == ((SortBlock2 *)blk2)->length) return 0; if (((SortBlock2 *)blk1)->length < ((SortBlock2 *)blk2)->length) return -1; return 1; } void superSort2(uint32_t *SeqIX, WTYPE **base, uint16_t *Lengths, int depth, size_t beginRange, size_t endRange) { size_t n = endRange - beginRange; // endRange is one after last index SortBlock2 *BinPtrs = malloc(n * sizeof(*BinPtrs)); if (!BinPtrs) {fputs("Error: memory (sort).\n",stderr); exit(3);} size_t depthSize = (depth+1) * PACKSIZE; size_t i = beginRange; for (; i < endRange; ++i) BinPtrs[i-beginRange] = (SortBlock2){base[SeqIX[i]][depth],SeqIX[i], Lengths[SeqIX[i]] <= depthSize ? Lengths[SeqIX[i]] : 0}; #ifdef USE_QSORT SB2_qsort(BinPtrs,n); #else qsort(BinPtrs, n, sizeof(*BinPtrs), SB2Cmp); #endif for (i=beginRange; i < endRange; ++i) SeqIX[i] = BinPtrs[i-beginRange].ix; free(BinPtrs); #define CASCADE_MERGE() \ if (i != lastUniq + 1) { \ /* Merge swapping indices for truncated pairs */ \ size_t z = lastUniq; for (; z < i; ++z) { \ if (Lengths[SeqIX[z]] <= depthSize) { \ if (z > lastUniq) { \ /* swap this ix with the ix at lastUniq++ */ \ uint32_t temp = SeqIX[z]; \ SeqIX[z] = SeqIX[lastUniq]; \ SeqIX[lastUniq] = temp; \ } \ ++lastUniq; \ } \ } \ /* Spawn a new sort on the remainder */ \ if (lastUniq < i-1) \ superSort2(SeqIX, base, Lengths, depth+1, lastUniq, i); \ } // Check for duplicates; for each set, move truncations to top WTYPE curElem = base[SeqIX[beginRange]][depth]; size_t lastUniq = beginRange; for (i=beginRange + 1; i < endRange; ++i) { if (base[SeqIX[i]][depth] != curElem) { CASCADE_MERGE(); curElem = base[SeqIX[i]][depth]; lastUniq = i; } } CASCADE_MERGE(); // end cap } inline KMerX * xalloc(int thread, WTYPE word) { // KBANK,KBANK_INITBINS,KBANK_MAXK,KBANK_BIN,KBANK_IX KMerX *Kptr = KBANK[thread][KBANK_BIN[thread]] + KBANK_IX[thread]; *Kptr = (KMerX){word,1,0,0}; if (++KBANK_IX[thread] == KBANK_MAXK) { // reset the ix, increment bin KBANK_IX[thread] = 0; if (++KBANK_BIN[thread] == KBANK_BINCNT[thread]) { // resize bin array KBANK[thread] = realloc(KBANK[thread], sizeof(*KBANK[thread])*(KBANK_BINCNT[thread]*=2)); if (!KBANK[thread]) { puts("ERROR: xalloc 1"); exit(3); } for (size_t x=KBANK_BINCNT[thread]/2; x<KBANK_BINCNT[thread]; ++x) { KBANK[thread][x] = malloc(KBANK_MAXK*sizeof(*KBANK[thread][x])); if (!KBANK[thread][x]) { puts("ERROR: xalloc 2"); exit(3); } } } } return Kptr; } void rexalloc(int threads) { // dynamically frees tree memory for (int i = 0; i < threads; ++i) { KBANK[i] = realloc(KBANK[i],sizeof(*KBANK[i])*KBANK_BIN[i]); KBANK[i][KBANK_BIN[i]] = realloc(KBANK[i][KBANK_BIN[i]], sizeof(*KBANK[i][KBANK_BIN[i]]) * KBANK_IX[i]); } } //////////// Tree manipulation methods ///////////// // returns whether new node was created; a counter int xeTree(KMerX *tree, WTYPE word, int T) { do { if (word > tree->word) { // go right if (!tree->right) { tree->right = xalloc(T,word); return 1; } tree = tree->right; } else if (word < tree->word) { // go left if (!tree->left) { tree->left = xalloc(T,word); return 1; } tree = tree->left; } } while (word != tree->word); ++tree->count; return 0; } // for repopulating an existing tree void reTree(KMerX *tree, KMerX *node) { for (;;) { if (node->word > tree->word) { // go right if (!tree->right) { node->left = 0; node->right = 0; tree->right = node; return; } tree = tree->right; } else { // go left if (!tree->left) { node->left = 0; node->right = 0; tree->left = node; return; } tree = tree->left; } } } // for merging existing trees (returns if new node added) int meNode(KMerX *tree, KMerX *node) { do { if (node->word > tree->word) { // go right if (!tree->right) { node->left = 0; node->right = 0; tree->right = node; return 1; // node->count; } tree = tree->right; } else if (node->word < tree->word) { // go left if (!tree->left) { node->left = 0; node->right = 0; tree->left = node; return 1; // node->count; } tree = tree->left; } } while (node->word != tree->word); tree->count += node->count; return 0; } // find in tree size_t fiTree(KMerX *tree, WTYPE word) { do { if (word > tree->word) { // go right if (!tree->right) return 0; tree = tree->right; } else if (word < tree->word) { // go left if (!tree->left) return 0; tree = tree->left; } } while (word != tree->word); return tree->count; } // get in tree (known existence) size_t giTree(KMerX *tree, WTYPE word) { do { if (word > tree->word) tree = tree->right; else if (word < tree->word) tree = tree->left; } while (word != tree->word); return tree->count; } // merge trees void meTree(KMerX *tree, KMerX *tree2, size_t *totals) { KMerX *left = tree2->left, *right = tree2->right; *totals += meNode(tree,tree2); if (left) meTree(tree, left, totals); if (right) meTree(tree, right, totals); } // Populates array with nodes in balanced order void traceBalance(KMerX *tree, KMerX **array, size_t *ix) { if (tree->left) traceBalance(tree->left, array, ix); array[(*ix)++] = tree; // if on top, DFS. If mid, IOS, if bot: LFS if (tree->right) traceBalance(tree->right, array, ix); } // Builds a balanced tree void buildBalanceL(KMerX *tree, KMerX **array, size_t sz); void buildBalanceR(KMerX *tree, KMerX **array, size_t sz); #define BUILDBALANCE() \ if (!sz) { \ CHILD = *array; \ CHILD->left = 0; \ CHILD->right = 0; \ return; \ } \ size_t ix = sz >> 1; \ CHILD = array[ix]; \ if (ix) buildBalanceL(CHILD,array,ix-1); \ else CHILD->left = 0; \ buildBalanceR(CHILD,array+(ix+1), sz-(ix+1)); // set a branch of the given tree, and recurse with that branch as root void buildBalanceL(KMerX *tree, KMerX **array, size_t sz) { #define CHILD tree->left BUILDBALANCE() #undef CHILD } void buildBalanceR(KMerX *tree, KMerX **array, size_t sz) { #define CHILD tree->right BUILDBALANCE() #undef CHILD } /////////// Tree reporting methods /////////// void traceCnt(KMerX *tree, size_t *ix) { if (tree->left) traceCnt(tree->left, ix); ++*ix; if (tree->right) traceCnt(tree->right, ix); } void traceTree(KMerX *tree) { if (tree->left) traceTree(tree->left); printf("%s\t%I64u\n",num2word(tree->word,WORDTEMP),tree->count); if (tree->right) traceTree(tree->right); } void traceTreeDetail(KMerX *tree, int depth) { printf("%d\t%s\t%I64u\n",depth, num2word(tree->word,WORDTEMP),tree->count); if (tree->left) traceTreeDetail(tree->left, depth+1); if (tree->right) traceTreeDetail(tree->right, depth+1); } size_t buildDepth(KMerX *node, int depth, int *depthMax, size_t *depthTot, size_t *count) { if (node->left) buildDepth(node->left,depth+1,depthMax,depthTot,count); if (depth > *depthMax) *depthMax = depth; ++(*count); (*depthTot) += depth; //printf("%s\t%I64u\n",num2word(tree->word,WORDTEMP),tree->count); if (node->right) buildDepth(node->right,depth+1,depthMax,depthTot,count); } void reportAvMaxDepth(KMerX *tree) { int depthMax = 0; size_t count = 0, depthTot=0; buildDepth(tree,1,&depthMax, &depthTot, &count); double depthAv = (double)(depthTot)/count; printf("Total nodes = %lu. Max depth=%d, Avg=%f\n",count,depthMax,depthAv); } /////////// Tree sorters/comparators (for balancing) //////////// int tfs_cmp(const void *a, const void *b) { KMerX *b1 = *(KMerX **)a, *b2 = *(KMerX **)b; return (b1->count > b2->count) ? -1 : (b1->count < b2->count); } void treeFreqSort(KMerX **arr, size_t n) { #ifdef USE_QSORT #define NODEFREQGT(a,b) ((*a)->count > (*b)->count) QSORT(KMerX*, arr, n, NODEFREQGT); #else qsort(arr, n, sizeof(*arr), tfs_cmp); #endif } int tns_cmp(const void *a, const void *b) { KMerX *b1 = *(KMerX **)a, *b2 = *(KMerX **)b; return (b1->word < b2->word) ? -1 : (b1->word > b2->word); } void treeNameSort(KMerX **arr, size_t n) { #ifdef USE_QSORT #define NODENAMELT(a,b) ((*a)->word < (*b)->word) QSORT(KMerX*, arr, n, NODENAMELT); #else qsort(arr, n, sizeof(*arr), tns_cmp); #endif } // Main frequency-dependent balancing function KMerX * balanceTree(KMerX *tree, size_t sz, size_t totalCount) { // set limits #define MAX_NODES 1000000 #define TOP_SHIFT 5 if (sz > MAX_NODES) return tree; size_t ix = 0; KMerX **array = malloc(sizeof(*array) * (sz+1)); traceBalance(tree, array, &ix); // experimental intervention: frequency-first tree construction treeFreqSort(array, sz+1); // Adaptive threshold determination size_t limit = 0; size_t thres = 0, cutoff = totalCount >> TOP_SHIFT; while ((thres += array[limit++]->count) < cutoff); //while (++limit <= sz && array[0]->count/array[limit]->count < 4); //printf("limit = %lld\n",limit); tree = array[0]; tree->left = 0; tree->right=0; if (limit > 2) { for (size_t i = 1; i < limit; ++i) reTree(tree,array[i]); // balance the top KMerX **top = malloc(sizeof(*top)* limit); for (size_t i = 0; i < limit; ++i) top[i] = array[i]; treeNameSort(top, limit); ix = (limit-1)/2; tree = top[ix]; buildBalanceL(tree, top, ix-1); buildBalanceR(tree, top + (ix+1), limit - 1 - (ix+1)); free (top); } else limit = 1; size_t limit2 = (sz+1); // Add in the rest //for (size_t i = limit; i < limit2; ++i) reTree(tree,array[i]); // Add in the rest v2 (stacade) //size_t limit3 = (sz+1); limit2 = (sz+1)/2; // comment out to enable bottomBalance if (limit2 > limit) { int L = 1; for (long long i = limit; i < limit2-1; ++i) { reTree(tree,array[i+L]); L = -L; } if (L==-1) reTree(tree,array[limit2-2]); else reTree(tree,array[limit2-1]); } free(array); return tree; } // SMP tree insersion method inline void clumpParachute(KMerX **Roots, WTYPE *Clumps, size_t *NumsInserted, size_t *TotalCounts, size_t *BalanceThreshes, size_t length) { //printf("here we go...\n"); #pragma omp parallel for schedule(dynamic,1000) for (int i = 0; i < length; ++i) { int tid = 0; #ifdef _OPENMP tid = omp_get_thread_num(); #endif ++TotalCounts[tid]; if (!Roots[tid]->count) { *Roots[tid] = (KMerX){Clumps[i],1,0,0}, NumsInserted[tid]=1; continue; } NumsInserted[tid] += xeTree(Roots[tid],Clumps[i],tid); if (NumsInserted[tid] >= BalanceThreshes[tid]) { //printf("Balancing tree (tid %d) at %lu\n",tid,NumsInserted[tid]); Roots[tid] = balanceTree(Roots[tid],NumsInserted[tid]-1, TotalCounts[tid]); /* reportAvMaxDepth(root); */ /* if (balanceThres==65535) {traceTreeDetail(root,0); return 0; } */ BalanceThreshes[tid]=(BalanceThreshes[tid]+1)*2-1; } } } // SMP aggregator: each thread's tree is merged into large tree KMerX* mergeParachutes(KMerX **Roots, int T, size_t *NumsInserted, size_t *TotalCounts, size_t *numInserted, size_t *totalCount) { *totalCount = *TotalCounts; // *TotalCounts; *numInserted = *NumsInserted; for (size_t i = 1; i < T; ++i) { *totalCount += TotalCounts[i]; meTree(*Roots,Roots[i],numInserted); } //traceCnt(*Roots,numInserted); return *Roots; } /////////////// K-mer denoisers //////////////// #ifdef DO_DEEP_K_DENOISE inline size_t findRarestK(KMerX *tree, WTYPE *seq, uint16_t length) { size_t min = giTree(tree,*seq), cur; unsigned offset = 0, basePack = 1; for (int i = 1, b=length-PACKSIZE+1; i < b; ++i) { if (++offset == PACKSIZE) cur = giTree(tree, seq[basePack]), ++basePack, offset = 0; else cur = giTree(tree, (*(seq+basePack-1) << (offset << 1)) + (*(seq+basePack) >> ((PACKSIZE-offset) << 1))); //printf("%llu [%s=%u:%u], ", cur,num2word(this,WORDTEMP),offset,basePack); //printf("[%llu] ",cur); if (cur < min) min = cur; } //printf("MIN=%llu\n",min); return min; } #else inline size_t findRarestK(KMerX *tree, WTYPE *seq, uint16_t length) { size_t numPacks = length/PACKSIZE; //if (numPacks * PACKSIZE < length) ++numPacks; size_t min = (size_t)-1, cur; for (int i = 0; i < numPacks; ++i) { cur = giTree(tree, seq[i]); //printf("%llu, ", cur); if (cur < min) min = cur; } #ifdef DO_K_ENDPIECE if (numPacks * PACKSIZE < length) { // handle endpiece unsigned mod = length % PACKSIZE; // guarantee: never 0 // rightshift by 2xmodulo WTYPE prev = (seq[numPacks-1] << (2*mod)) + (seq[numPacks] >> (2*(PACKSIZE-mod))); cur = fiTree(tree, prev); if (cur < min) min = cur; } //printf("\n"); #endif return min; } #endif size_t findRarestK_PE(KMerX *tree, WTYPE *seq, uint16_t length, uint16_t length2) { size_t numPacks = (length - length2)/PACKSIZE, min = (size_t)-1, cur; int ditch = numPacks * PACKSIZE < (length - length2); int i = 0; for (; i < numPacks; ++i) { cur = giTree(tree,seq[i]); if (cur < min) min = cur; } numPacks = length/PACKSIZE; i += ditch; for (; i < numPacks; ++i) { cur = giTree(tree,seq[i]); if (cur < min) min = cur; } return min; } int main( int argc, char *argv[] ) { clock_t start; double cpu_time_used; start = clock(); // profiler // Debugging statements #ifdef DEBUG printf("type size=%u, shift=%u, pack=%u\n", sizeof(WTYPE), RSHFT, PACKSIZE); printf("max int size=%u/%u\n",sizeof(unsigned),sizeof(uint64_t)); printf("Size of SortBlock2=%u\n",sizeof(SortBlock2)); #endif if ( argc < 3 || argc > 12 ) PRINT_USAGE(); int carg = 1; char *inputFilename = argv[carg++]; char *read2Str = 0; if (!strcmp(argv[carg],"PE")) ++carg, read2Str = argv[carg++]; printf("%ssing paired-end reads %s\n", read2Str ? "U" : "Not u", read2Str ? read2Str : ""); char *prefixStr = argv[carg++]; if (carg > argc) {puts("Error: prefix required."); return 2; } char *fasta_sx = "_filt.fa", *db_sx = ".db", *dp_sx = "_dupes.txt", *filt_sx = "_filtered.txt", *fasta2_sx = "2_filt.fa"; char *outputFasta = calloc(1,1+strlen(prefixStr)+strlen(fasta_sx)), *outputFasta2 = calloc(1,1+strlen(prefixStr)+strlen(fasta2_sx)), *outputDB = calloc(1,1+strlen(prefixStr)+strlen(db_sx)), *outputDP = calloc(1,1+strlen(prefixStr)+strlen(dp_sx)), *outputFL = calloc(1,1+strlen(prefixStr)+strlen(filt_sx)); strcpy(outputFasta,prefixStr); strcpy(outputFasta+strlen(prefixStr),fasta_sx); strcpy(outputFasta2,prefixStr); strcpy(outputFasta2+strlen(prefixStr),fasta2_sx); strcpy(outputDB,prefixStr); strcpy(outputDB+strlen(prefixStr),db_sx); strcpy(outputDP,prefixStr); strcpy(outputDP+strlen(prefixStr),dp_sx); strcpy(outputFL,prefixStr); strcpy(outputFL+strlen(prefixStr),filt_sx); FILE *fp = fopen(inputFilename, "rb"); FILE *r2 = read2Str ? fopen(read2Str,"rb") : 0; if (!fp || (read2Str && !r2)) { puts("Invalid input FASTA(s)"); return 2; } FILE *off = fopen(outputFasta, "wb"), *ofd = fopen(outputDB,"wb"); FILE *off2 = read2Str ? fopen(outputFasta2, "wb") : 0; if (!off || !ofd || (read2Str && !off2)) { puts("Invalid output prefix; cannot create output file(s)."); return 2; } FILE *ofdp=0, *ofdpF = 0; size_t trim = UINT16_MAX, trim2 = UINT16_MAX; int doRC = 0, doLog = 0; double filt_i = 0.f; int copyNumThres = 0; // denoisers int numThreads = 1, convert_amb = 0; if (strcmp(argv[argc-1],"ST")) { // enable MT if not "ST" #ifdef _OPENMP numThreads = omp_get_max_threads(); #endif } else --argc; if (!strcmp(argv[argc-1],"CN")) { // convert N's to A's convert_amb = 1; --argc; } if (argc > 3 && !strcmp(argv[argc-1],"LOG")) { // enable MT if not "ST" doLog = 1; ofdp=fopen(outputDP,"wb"); ofdpF=fopen(outputFL,"wb"); if (!ofdp || !ofdpF) { puts("Invalid output prefix"); exit(2); } puts("Log writing enabled."); --argc; } #ifdef _OPENMP omp_set_num_threads(numThreads); #else numThreads = 1; #endif // Denoises at default intensity if (argc > 3 && !strcmp(argv[argc-1],"D")) { filt_i = 2.f; printf("Performing NINJA k-mer denoising at DEFAULT intensity: %.0f k-mers\n",filt_i); --argc; } // Denoises at specified intensity in the form x.y else if (argc > 3 && !strcmp(argv[argc-2],"D")) { filt_i = atof(argv[argc-1]); if (filt_i < 0) printf("Invalid denoising intensity (expect #REPS[.###Kmers]).\n"); else { if (filt_i >= 1.f) { copyNumThres = filt_i; filt_i -= copyNumThres; if (copyNumThres > 1 || !read2Str) printf("Performing NINJA replicon-denoising" " at %u %sreads.\n", copyNumThres, read2Str ? "" : "compacted "); } if (filt_i) { // Use the decimal remainder as kmer denoising printf("Performing NINJA k-mer denoising at %.0f k-mers\n", filt_i*1000.f); filt_i *= 1000; if (copyNumThres) ++copyNumThres; } } argc -= 2; } int (*cmpF)(WTYPE *, WTYPE *, uint16_t, uint16_t) = (copyNumThres && !read2Str) ? &ycmp : &zcmp; //zcmp replaces xcmp if (!copyNumThres) copyNumThres = filt_i ? -1 : 1; if (argc > 3 && !strcmp(argv[argc-1],"RC")) { printf("Reverse complementing the sequences.\n"); doRC = 1; --argc; } // Flags for truncation after specified base if (argc == 4 || (read2Str && argc > 5)) { char *arg = argv[argc-1]; int tlen = strlen(arg); char *cix = strchr(arg,','); trim = atoi(argv[argc-1]) ?: trim; if (cix) trim2 = atoi(cix+1) ?: trim2; else trim2 = trim; printf("Trimming %s sequences to %d bases.\n", read2Str && cix ? "r1" : "input", trim); if (read2Str && cix) printf("Trimming r2 sequences to %d bases.\n",trim2); } C2Xb = calloc(256,sizeof(WTYPE)); C2Xb['a'] = 0; C2Xb['A'] = 0; C2Xb['c'] = 1; C2Xb['C'] = 1; C2Xb['g'] = 2; C2Xb['G'] = 2; C2Xb['t'] = 3; C2Xb['T'] = 3; ACCEPTED = calloc(256,sizeof(*ACCEPTED)); ACCEPTED['a'] = 1; ACCEPTED['A'] = 1; ACCEPTED['c'] = 1; ACCEPTED['C'] = 1; ACCEPTED['g'] = 1; ACCEPTED['G'] = 1; ACCEPTED['t'] = 1; ACCEPTED['T'] = 1; size_t numElem = 1000, ns=0; char **Samples = malloc(numElem*sizeof(char *)); char **SeqIDs = doLog ? malloc(numElem*sizeof(*SeqIDs)) : 0; WTYPE **ReadsX = malloc(numElem*sizeof(WTYPE *)); uint16_t *Sizes = calloc(numElem,sizeof(uint16_t)); uint16_t *Sizes2 = read2Str ? calloc(numElem,sizeof(uint16_t)) : 0; char *line = malloc(LINELEN + 1), *initLine = line, // read up to 65k *line2 = malloc(LINELEN + 1), *initLine2 = line2; // MT versions of k-denoisers size_t queuedClumps = 0, fireThres = 1000000; #define BAL_THRES 255 KMerX **Roots = 0; WTYPE *Clumps = 0; size_t *NumsInserted=0, *TotalCounts=0, *BalanceThreshes=0; //KMerX **KBANK = 0; // GLOBAL VARIABLES // size_t KBANK_MAXK = 1000, *KBANK_BIN =0, *KBANK_IX = 0; if (filt_i) { printf("Number of threads for k-mer denoise: %d\n",numThreads); Roots = malloc(numThreads*sizeof(*Roots)); Clumps = malloc(fireThres*sizeof(*Clumps)); NumsInserted = calloc(numThreads,sizeof(*NumsInserted)); TotalCounts = calloc(numThreads,sizeof(*TotalCounts)); BalanceThreshes = malloc(numThreads*sizeof(*BalanceThreshes)); KBANK = malloc(numThreads*sizeof(*KBANK)); KBANK_BIN = calloc(numThreads,sizeof(*KBANK_BIN)); KBANK_BINCNT = malloc(numThreads*sizeof(*KBANK_BINCNT)); KBANK_IX = calloc(numThreads,sizeof(*KBANK_IX)); for (int i = 0; i < numThreads; ++i) { Roots[i] = malloc(sizeof(*Roots[i])); *Roots[i] = (KMerX){0,0,0,0}; BalanceThreshes[i] = BAL_THRES; KBANK[i] = malloc(KBANK_INITBINS*sizeof(*KBANK[i])); KBANK_BINCNT[i] = KBANK_INITBINS; for (int j=0; j < KBANK_INITBINS; ++j) { KBANK[i][j] = malloc(KBANK_MAXK*sizeof(*KBANK[i][j])); // init this bin's kmers if (!KBANK[i][j]) {puts("error: xalloc 0"); exit(3); } } } } size_t ns_amb = 0, n_warned = 0, rejected = 0, totalCnt = 0; while (line = fgets(line,LINELEN,fp)) { ++totalCnt; if (ns == numElem) { numElem *= 2; Samples = realloc(Samples,numElem * sizeof(*Samples)); ReadsX = realloc(ReadsX, numElem * sizeof(*ReadsX)); Sizes = realloc(Sizes, numElem*sizeof(*Sizes)); if (!Samples || !ReadsX || !Sizes) { fputs("Error in resize: memory.\n",stderr); exit(3); } if (read2Str) { Sizes2 = realloc(Sizes2, numElem*sizeof(*Sizes2)); if (!Sizes2) {fputs("Error in resize: memory.\n",stderr); exit(3);} } if (doLog) { SeqIDs = realloc(SeqIDs, numElem * sizeof(*SeqIDs)); if (!SeqIDs) {fputs("Error in resize: memory.\n",stderr); exit(3);} } } // Check format consistency if (*line != '>') { fprintf(stderr,"FASTA error; expected '>' on line %llu\n",totalCnt); exit(2); } // copy in the sample name up to _ or null minus 1 char *src = line + 1; while (*src != '_' && *src != ' ' && *src != '\n') ++src; if (doLog) { // also trace until whitespace for sample id char *seqID = src; while (*seqID != ' ' && *seqID != '\n' && *seqID != '\r') ++seqID; SeqIDs[ns] = malloc(seqID - src + 1); if (!SeqIDs[ns]) {puts("Out of memory for SeqIDs"); exit(3);} char *d = SeqIDs[ns]; char *b = src; while (b < seqID) *d++ = *b++; *d = 0; } Samples[ns] = malloc(src - line); if (!Samples[ns]) {puts("Not enough Samples[ns] mem"); exit(3);} char *dest = Samples[ns]; char *beginSample = line + 1; while (beginSample < src) *dest++ = *beginSample++; *dest = 0; // copy in the encoded sequence(s) if (!(line = fgets(line,LINELEN,fp))) { fputs("FASTA error: unexpected end of file (R1).\n",stderr); exit(2); } if (*line == '>') { fprintf(stderr,"FASTA error; unexpected '>' on line %llu (R1)\n",totalCnt); exit(2); } src = line; register size_t length = strlen(src); if (src[length-1] == '\n') --length; // lop off newline(s) if (src[length-1] == '\r') --length; // supports every platform! if (trim < length) length = trim; if (length >= UINT16_MAX) { printf("Warning: truncating read %llu.\n",ns); length = UINT16_MAX - 1; } size_t numPacks; // Check second sequence int len2 = 0; char *src2; if (read2Str) { fgets(line2,LINELEN,r2); // skip sample if (!(line2 = fgets(line2,LINELEN,r2))) { fputs("FASTA error: unexpected end of file (R1).\n",stderr); exit(2); } if (*line == '>') { fprintf(stderr,"FASTA error; unexpected '>' on line %llu (R2)\n",totalCnt); exit(2); } len2 = strlen(line2); if (line2[len2-1] == '\n') --len2; // lop off newline(s) if (line2[len2-1] == '\r') --len2; // supports every platform! src2 = line2; if (trim2 < len2) src2 += len2 - trim2, len2 = trim2; if (len2 >= UINT16_MAX) { printf("Warning: truncating read 2: %llu.\n",ns); len2 = UINT16_MAX - 1; } Sizes2[ns] = len2; length += len2; // first length is compounded } Sizes[ns] = length; numPacks = length/PACKSIZE; if (numPacks * PACKSIZE < length) ++numPacks; ReadsX[ns] = malloc(numPacks*sizeof(WTYPE)); if (!ReadsX[ns]) {puts("Bad ReadsX[ns] mem"); return 3; } #define GENERATE_WORD_PRE() \ for (; k < bound; ++k, ++z) { \ clump <<= 2u; \ !ACCEPTED[*src] && (amb=1,++n_warned);\ clump += C2Xb[*src++]; \ if (z == PACKSIZE) *thisPack++ = clump, z = 0; #define GENERATE_KMER() \ if (k + 2 > PACKSIZE) { \ Clumps[queuedClumps++] = clump; \ if (queuedClumps == fireThres) { \ clumpParachute(Roots,Clumps,NumsInserted, \ TotalCounts,BalanceThreshes,fireThres); \ queuedClumps = 0; \ } \ } #define GENERATE_WORD_POST() } int k = 1, z = 2, bound = length - len2, amb = 0; WTYPE *thisPack = ReadsX[ns]; WTYPE clump = C2Xb[*src]; !ACCEPTED[*src++] && (amb=1,++n_warned); if (filt_i) GENERATE_WORD_PRE() GENERATE_KMER() GENERATE_WORD_POST() else GENERATE_WORD_PRE() GENERATE_WORD_POST() if (read2Str) { k = 0; // also resets k-mer for read 2 bound = len2; src = src2; if (filt_i) GENERATE_WORD_PRE() GENERATE_KMER() GENERATE_WORD_POST() else GENERATE_WORD_PRE() GENERATE_WORD_POST() } numPacks *= PACKSIZE; if (numPacks > length) *thisPack++ = clump << ((numPacks - length) << 1); if (amb) { ++ns_amb; if (!convert_amb) { if (doLog) { ++rejected; fprintf(ofdpF,"%s%s\tAMBIGUOUS\n",Samples[ns],SeqIDs[ns]); free(SeqIDs[ns]); } free(Samples[ns]); free(ReadsX[ns]); continue; //without incrementing } } ++ns; } if (n_warned) printf("WARNING: Found %llu sequences with ambiguity" " (%llu ambiguous bases).\n",ns_amb,n_warned); KMerX *master = 0; if (filt_i) { if (queuedClumps) clumpParachute(Roots,Clumps,NumsInserted, TotalCounts,BalanceThreshes,queuedClumps); size_t numInserted = 0, totalCount = 0; if (numThreads > 1) { master = mergeParachutes(Roots, numThreads, NumsInserted, TotalCounts,&numInserted, &totalCount); //master = balanceTree(master,numInserted-1, totalCount); //master = quickBalance(master,numInserted-1); } else { master = *Roots; numInserted = *NumsInserted; totalCount = *TotalCounts; } //traceTreeDetail(*Roots,0); printf("Distinct K-mers found: %lu, Total k-mers: %llu\n",numInserted,totalCount); #ifdef DEBUG reportAvMaxDepth(master); #endif //rexalloc(numThreads); } fclose(fp); free(line); // Shrink data structures for more memory Samples = realloc(Samples,ns * sizeof(*Samples)); ReadsX = realloc(ReadsX, ns * sizeof(*ReadsX)); Sizes = realloc(Sizes, ns * sizeof(*Sizes)); if (read2Str) Sizes2 = realloc(Sizes2, ns * sizeof(*Sizes2)); if (doLog) SeqIDs = realloc(SeqIDs, ns * sizeof(*SeqIDs)); printf("Number of sequences: %u\n",ns + ns_amb); if (ns > UINT32_MAX) {puts("Too many sequences (>4 Bn)."); return 4;} printf("Total reads considered: %u\n",ns); #ifdef PROFILE printf("->Short read parse: %f\n", ((double) (clock() - start)) / CLOCKS_PER_SEC); start = clock(); #endif // Create index structure for sequences read (in 32-bit) uint32_t *SeqIX = malloc(ns * sizeof(*SeqIX)); size_t k = 0; for (; k < ns; ++k) SeqIX[k] = k; superSort2(SeqIX, ReadsX, Sizes, 0,0,ns); printf("Reads sorted.\n"); char ***smpSrt = malloc(ns * sizeof(*smpSrt)), **SmpDD = malloc(ns * sizeof(*SmpDD)); if (!smpSrt || !SmpDD) { fprintf(stderr,"Out of post-memory: parray.\n"); exit(3); } for (k=0; k < ns; ++k) smpSrt[k] = &Samples[k]; twrqs(smpSrt, ns, 0); *SmpDD = **smpSrt; // store first sample unsigned x = 1; for (k=1; k < ns; ++k) if (strcmp(*smpSrt[k-1],*smpSrt[k])) SmpDD[x++] = *smpSrt[k]; free(smpSrt); SmpDD = realloc(SmpDD,sizeof(char*)*x); printf("%d Samples found.\n",x); if (x == ns) { puts("*************************************"); puts("* WARNING!! WARNING!! WARNING!! *"); puts("* No. of samples = no. of reads *"); puts("* Casting # of samples to 1. *"); puts("*************************************"); x = 1, *SmpDD = "AllSamps"; } fprintf(ofd, "%u\n", x); for (k=0; k < x; ++k) fprintf(ofd,"%s\n", SmpDD[k]); #ifdef PROFILE printf("->Short read sample prep: %f\n", ((double) (clock() - start)) / CLOCKS_PER_SEC); start = clock(); #endif // Create counts array of integers parallel to the unique samples array unsigned *Counts = calloc(x, sizeof(*Counts)); if (!Counts) {puts("unable to allocate counts"); return 3;} int64_t i_copyThres = filt_i ? copyNumThres-1 : INT64_MAX; size_t filt_n = filt_i; #ifdef DEBUG printf("copyNumThres=%d, copyThres=%llu, filt_i=%f [%u]\n", copyNumThres,i_copyThres,filt_i,filt_n); #endif #define WRITE_SUPPORTED_DUPE() {\ if (copies >= copyNumThres || (copies >= i_copyThres && \ (read2Str ? findRarestK_PE(master,ReadsX[prevIX], Sizes[prevIX],Sizes2[prevIX]) : \ findRarestK(master, ReadsX[prevIX], Sizes[prevIX])) >= filt_n)) { \ /* printf("\nfound rarest K=%llu\n",findRarestK2(master, ReadsX[prevIX], Sizes[prevIX])); */ \ if (doLog) { \ for (unsigned w = lastLogged; w < k; ++w) \ ++committed, fprintf(ofdp,"%s%s\t",Samples[SeqIX[w]],SeqIDs[SeqIX[w]]); \ fprintf(ofdp,"\n"); \ lastLogged = k; \ } \ for (int y = 0; y < x; ++y) \ if (Counts[y]) fprintf(ofd,"%u:%u:",y,Counts[y]), Counts[y] = 0; \ fprintf(ofd,"\n"); \ if (doRC) { \ char *bon = decodeStringXRC(ReadsX[prevIX], Sizes[prevIX],word,string); \ if (read2Str) { \ fprintf(off,">%u\n%s\n",rix, bon + Sizes2[prevIX]); \ bon[Sizes2[prevIX]] = 0; \ fprintf(off2,">%u\n%s\n",rix, bon); \ } else fprintf(off,">%u\n%s\n",rix, bon); \ ++rix; \ } \ else { \ char *bon = decodeStringX(ReadsX[prevIX], Sizes[prevIX],word,string); \ if (read2Str) { \ fprintf(off2,">%u\n%s\n", rix, bon + Sizes[prevIX] - Sizes2[prevIX]); \ bon[Sizes[prevIX] - Sizes2[prevIX]] = 0; \ fprintf(off,">%u\n%s\n", rix, bon); \ } else fprintf(off,">%u\n%s\n", rix, bon); \ ++rix; \ } \ } \ else { \ if (doLog) { \ for (unsigned w = lastLogged; w < k; ++w) \ ++rejected, fprintf(ofdpF,"%s%s\tFILTERED\n", \ Samples[SeqIX[w]],SeqIDs[SeqIX[w]]); \ lastLogged = k; \ } \ memset(Counts,0,x*sizeof(unsigned)); \ } \ copies = 1; \ } size_t committed = 0; //, rejected = 0; // now defined before main loop unsigned copies = 1, dupes = 0, rix=0; char *string = malloc(UINT16_MAX), *word = calloc(PACKSIZE+1,1); unsigned prevIX, thisIX, lastLogged = 0; for (k=1; k < ns; ++k) { prevIX = SeqIX[k-1]; thisIX = SeqIX[k]; if (x==1) ++*Counts; else ++Counts[crBST(Samples[prevIX],x-1,SmpDD)]; if (cmpF(ReadsX[prevIX],ReadsX[thisIX],Sizes[prevIX], Sizes[thisIX])) WRITE_SUPPORTED_DUPE() else { ++copies; ++dupes; } } prevIX = thisIX; if (x==1) ++*Counts; else ++Counts[crBST(Samples[prevIX],x-1,SmpDD)]; // add last count WRITE_SUPPORTED_DUPE() if (doLog) printf("Number of reads rejected = %llu, committed = %llu\n", rejected, committed); puts("Finished."); #ifdef PROFILE printf("->Mapping and file writing: %f\n", ((double) (clock() - start)) / CLOCKS_PER_SEC); start = clock(); #endif free (SeqIX); free (string); return 0; }
move_particle_utility_pfem2.h
/* ============================================================================== KratosIncompressibleFluidApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi pooyan@cimne.upc.edu rrossi@cimne.upc.edu - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ // // Project Name: Kratos // Last Modified by: $Author: pbecker $ // Date: $Date: 2011-09-21 12:30:32 $ // Revision: $Revision: 1.0 $ // // #if !defined(KRATOS_MOVE_PARTICLE_UTILITY_PFEM2_INCLUDED) #define KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/node.h" /// #include "includes/dof.h" #include "includes/variables.h" #include "includes/cfd_variables.h" #include "includes/deprecated_variables.h" #include "includes/global_pointer_variables.h" #include "containers/array_1d.h" #include "containers/data_value_container.h" #include "includes/mesh.h" #include "utilities/math_utils.h" #include "processes/node_erase_process.h" /// #include "utilities/geometry_utilities.h" #include "includes/model_part.h" #include "spatial_containers/spatial_containers.h" #include "spatial_containers/cell.h" #include "spatial_containers/bins_dynamic_objects.h" #include "utilities/spatial_containers_configure.h" #include "geometries/line_2d_2.h" #include "geometries/triangle_2d_3.h" #include "geometries/triangle_3d_3.h" #include "geometries/point.h" #include "pfem_2_application_variables.h" #include "pfem_particle_fluidonly.h" //#include "utilities/enrich_2d_2dofs.h" #include "utilities/enrichment_utilities.h" #include "utilities/openmp_utils.h" #include "time.h" //#include "processes/process.h" namespace Kratos { //this class is to be modified by the user to customize the interpolation process template< unsigned int TDim> class MoveParticleUtilityPFEM2 { public: typedef SpatialContainersConfigure<TDim> Configure; typedef typename Configure::PointType PointType; //typedef PointType::CoordinatesArrayType CoordinatesArrayType; typedef typename Configure::ContainerType ContainerType; //typedef Configure::PointerType PointerType; typedef typename Configure::IteratorType IteratorType; typedef typename Configure::ResultContainerType ResultContainerType; //typedef Configure::ResultPointerType ResultPointerType; typedef typename Configure::ResultIteratorType ResultIteratorType; typedef PointerVector< PFEM_Particle_Fluid, PFEM_Particle_Fluid*, std::vector<PFEM_Particle_Fluid*> > ParticlePointerVector; //typedef Configure::ContactPairType ContactPairType; //typedef Configure::ContainerContactType ContainerContactType; //typedef Configure::IteratorContactType IteratorContactType; //typedef Configure::PointerContactType PointerContactType; //typedef Configure::PointerTypeIterator PointerTypeIterator; KRATOS_CLASS_POINTER_DEFINITION(MoveParticleUtilityPFEM2); //template<unsigned int TDim> MoveParticleUtilityPFEM2(ModelPart& model_part, int maximum_number_of_particles) : mr_model_part(model_part) , mmaximum_number_of_particles(maximum_number_of_particles) { KRATOS_INFO("MoveParticleUtilityPfem2") << "Initializing utility" << std::endl; Check(); //tools to move the domain, in case we are using a moving domain approach. mintialized_transfer_tool=false; mcalculation_domain_complete_displacement=ZeroVector(3); mcalculation_domain_added_displacement=ZeroVector(3); //storing water and air density and their inverses, just in case it is needed for the streamline integration ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); mDENSITY_AIR = CurrentProcessInfo[DENSITY_AIR]; mDENSITY_WATER = CurrentProcessInfo[DENSITY_WATER]; //mmaximum_number_of_particles = maximum_number_of_particles; //loop in elements to change their ID to their position in the array. Easier to get information later. //DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!! ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; ielem->SetId(ii+1); } mlast_elem_id= (mr_model_part.ElementsEnd()-1)->Id(); int node_id=0; // we look for the smallest edge. could be used as a weighting function when going lagrangian->eulerian instead of traditional shape functions(method currently used) ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator pnode = inodebegin+ii; array_1d<double,3> position_node; double distance=0.0; position_node = pnode->Coordinates(); GlobalPointersVector< Node<3> >& rneigh = pnode->GetValue(NEIGHBOUR_NODES); //we loop all the nodes to check all the edges const double number_of_neighbours = double(rneigh.size()); for( GlobalPointersVector<Node<3> >::iterator inode = rneigh.begin(); inode!=rneigh.end(); inode++) { array_1d<double,3> position_difference; position_difference = inode->Coordinates() - position_node; double current_distance= sqrt(pow(position_difference[0],2)+pow(position_difference[1],2)+pow(position_difference[2],2)); //if (current_distance>distance) // distance=current_distance; distance += current_distance / number_of_neighbours; } //and we save the largest edge. pnode->FastGetSolutionStepValue(MEAN_SIZE)=distance; node_id=pnode->GetId(); } } mlast_node_id=node_id; //we also calculate the element mean size in the same way, for the courant number //also we set the right size to the LHS column for the pressure enrichments, in order to recover correctly the enrichment pressure vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); //before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element. #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; double elem_size; array_1d<double,3> Edge(3,0.0); Edge = ielem->GetGeometry()[1].Coordinates() - ielem->GetGeometry()[0].Coordinates(); elem_size = Edge[0]*Edge[0]; for (unsigned int d = 1; d < TDim; d++) elem_size += Edge[d]*Edge[d]; for (unsigned int i = 2; i < (TDim+1); i++) for(unsigned int j = 0; j < i; j++) { Edge = ielem->GetGeometry()[i].Coordinates() - ielem->GetGeometry()[j].Coordinates(); double Length = Edge[0]*Edge[0]; for (unsigned int d = 1; d < TDim; d++) Length += Edge[d]*Edge[d]; if (Length < elem_size) elem_size = Length; } elem_size = sqrt(elem_size); ielem->SetValue(MEAN_SIZE, elem_size); //and the matrix column for the enrichments in the pressure. if (TDim==3) ielem->SetValue(ENRICH_LHS_ROW_3D, ZeroVector(4)); // { // Vector & lhs_enrich = ielem->GetValue(ENRICH_LHS_ROW_3D); // lhs_enrich.resize(4); // lhs_enrich=ZeroVector(4); // } else ielem->SetValue(ENRICH_LHS_ROW, ZeroVector(3)); //KRATOS_WATCH(mElemSize) } } //matrix containing the position of the 4/15/45 particles that we will seed at the beggining BoundedMatrix<double, 5*(1+TDim), 3 > pos; BoundedMatrix<double, 5*(1+TDim), (1+TDim) > N; int particle_id=0; mnelems = mr_model_part.Elements().size(); KRATOS_INFO("MoveParticleUtilityPfem2") << "About to resize vectors" << std::endl; //setting the right size to the vector containing the particles assigned to each element //particles vector. this vector contains ALL the particles in the simulation. mparticles_vector.resize(mnelems*mmaximum_number_of_particles); //and this vector contains the current number of particles that are in each element (currently zero) mnumber_of_particles_in_elems.resize(mnelems); mnumber_of_particles_in_elems=ZeroVector(mnelems); //when moving the particles, an auxiliary vector is necessary (to store the previous number) mnumber_of_particles_in_elems_aux.resize(mnelems); //each element will have a list of pointers to all the particles that are inside. //this vector contains the pointers to the vector of (particle) pointers of each element. mpointers_to_particle_pointers_vectors.resize(mnelems); KRATOS_INFO("MoveParticleUtilityPfem2") << "About to create particles" << std::endl; //now we seed: LOOP IN ELEMENTS //using loop index, DO NOT paralelize this! change lines : mparticles_in_elems_pointers((ii*mmaximum_number_of_particles)+mparticles_in_elems_integers(ii)) = pparticle; and the next one for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; ielem->SetValue(FLUID_PARTICLE_POINTERS, ParticlePointerVector( mmaximum_number_of_particles*2) );//, &firstparticle ); ParticlePointerVector& particle_pointers = ielem->GetValue(FLUID_PARTICLE_POINTERS); //now we link the mpointers_to_particle_pointers_vectors to the corresponding element mpointers_to_particle_pointers_vectors(ii) = &particle_pointers; //now we resize the vector of particle pointers. it is double sized because we move the particles from an initial position (first half) to a final position (second half). //for(int j=0; j<(mmaximum_number_of_particles*2); j++) // particle_pointers.push_back(&firstparticle); int & number_of_particles = ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); number_of_particles=0; //int & number_of_water_particles = ielem->GetValue(NUMBER_OF_WATER_PARTICLES); Geometry< Node<3> >& geom = ielem->GetGeometry(); //unsigned int elem_id = ielem->Id(); //mareas_vector[i_int]=CalculateArea(geom); UNUSED SO COMMENTED ComputeGaussPointPositions_initial(geom, pos, N); //we also have the standard (4), and 45 //now we seed the particles in the current element for (unsigned int j = 0; j < pos.size1(); j++) { ++particle_id; PFEM_Particle_Fluid& pparticle = mparticles_vector[particle_id-1]; pparticle.X()=pos(j,0); pparticle.Y()=pos(j,1); pparticle.Z()=pos(j,2); pparticle.GetEraseFlag()=false; array_1d<float, 3 > & vel = pparticle.GetVelocity(); float & distance = pparticle.GetDistance(); noalias(vel) = ZeroVector(3); distance = 0.0; for (unsigned int k = 0; k < (TDim+1); k++) { noalias(vel) += (N(j, k) * geom[k].FastGetSolutionStepValue(VELOCITY)); distance += N(j, k) * geom[k].FastGetSolutionStepValue(DISTANCE); } if (distance <= 0.0) distance = -1.0; else distance = 1.0; particle_pointers(j) = &pparticle; number_of_particles++; } } bool nonzero_mesh_velocity = false; //seeing if we have to use the mesh_velocity or not for(ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode!=mr_model_part.NodesEnd(); inode++) { const array_1d<double, 3 > velocity = inode->FastGetSolutionStepValue(MESH_VELOCITY); for(unsigned int i = 0; i!=3; i++) { if (fabs(velocity[i])>1.0e-9) nonzero_mesh_velocity=true; } if( nonzero_mesh_velocity==true) break; } if ( nonzero_mesh_velocity==true) muse_mesh_velocity_to_convect = true; // if there is mesh velocity, then we have to take it into account when moving the particles else muse_mesh_velocity_to_convect = false; //otherwise, we can avoid reading the values since we know it is zero everywhere (to save time!) m_nparticles=particle_id; //we save the last particle created as the total number of particles we have. For the moment this is true. KRATOS_INFO("MoveParticleUtilityPfem2") << "Number of particles created : " << m_nparticles << std::endl; mparticle_printing_tool_initialized=false; } ~MoveParticleUtilityPFEM2() {} void MountBin() { KRATOS_TRY //copy the elements to a new container, as the list will //be shuffled duringthe construction of the tree ContainerType& rElements = mr_model_part.ElementsArray(); IteratorType it_begin = rElements.begin(); IteratorType it_end = rElements.end(); //const int number_of_elem = rElements.size(); typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) ); paux.swap(mpBinsObjectDynamic); //BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end ); KRATOS_INFO("MoveParticleUtilityPfem2") << "Finished mounting Bins" << std::endl; KRATOS_CATCH("") } //TOOL TO TRANSFER INFORMATION INITIALLY FROM ONE DOMAIN TO OTHER. void IntializeTransferTool(ModelPart* topographic_model_part, array_1d<double, 3 > initial_domains_offset, bool ovewrite_particle_data) //mtopographic_model_part(topographic_model_part) { KRATOS_TRY mintialized_transfer_tool=true; const unsigned int max_results = 1000; std::cout << "initializing transfer utility" << std::endl; ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); mcalculation_domain_complete_displacement=initial_domains_offset; mtopographic_model_part_pointer = topographic_model_part; //copying the pointer. //CONSTRUCTING BIN STRUCTURE ContainerType& rElements_topo = mtopographic_model_part_pointer->ElementsArray(); IteratorType it_begin_topo = rElements_topo.begin(); IteratorType it_end_topo = rElements_topo.end(); typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin_topo, it_end_topo ) ); paux.swap(mpTopographicBinsObjectDynamic); std::cout << "Gathering Information From Topographic Domain for the first time" << std::endl; if(ovewrite_particle_data==false) { std::cout << "Not overwriting particle data (assuming correct initial conditions in calculation domain)" << std::endl; } else { std::cout << "Replacing particle information using the Topographic domain" << std::endl; const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //KRATOS_WATCH(offset) //(flag managed only by MoveParticles ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { ResultContainerType results(max_results); ResultIteratorType result_begin = results.begin(); for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { if (results.size()!=max_results) results.resize(max_results); //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Element::Pointer pelement(*it_begin_topo); //we have no idea in which element it might be from the topographic domain, so we just set it in the first element. //Geometry<Node<3> >& geom = ielem->GetGeometry(); //array_1d<double,TDim+1> N; ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); //std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { //KRATOS_WATCH(iii) if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; PFEM_Particle_Fluid & pparticle = element_particle_pointers[offset+iii]; bool erase_flag= pparticle.GetEraseFlag(); if (erase_flag==false) { OverwriteParticleDataUsingTopographicDomain(pparticle,pelement,mcalculation_domain_complete_displacement,result_begin, max_results); } } } } } KRATOS_CATCH("") } //TOOL TO TRANSFER INFORMATION FROM ONE DOMAIN TO OTHER when necessary. to be don void PreReseedUsingTopographicDomain(const int minimum_number_of_particles, array_1d<double, 3 > domains_added_displacement) //mtopographic_model_part(topographic_model_part) { KRATOS_TRY if(mintialized_transfer_tool==false) KRATOS_THROW_ERROR(std::logic_error, "TRANSFER TOOL NOT INITIALIZED!", ""); const unsigned int max_results = 1000; std::cout << "executing transfer tool" << std::endl; ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); mcalculation_domain_added_displacement = domains_added_displacement; mcalculation_domain_complete_displacement += domains_added_displacement; ContainerType& rElements_topo = mtopographic_model_part_pointer->ElementsArray(); IteratorType it_begin_topo = rElements_topo.begin(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //KRATOS_WATCH(offset) //(flag managed only by MoveParticles ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { ResultContainerType results(max_results); ResultIteratorType result_begin = results.begin(); Element::Pointer pelement(*it_begin_topo); //we have no idea in which element it might be from the topographic domain, so we just set it in the first element. BoundedMatrix<double, (TDim+1), 3 > pos; BoundedMatrix<double, (TDim+1) , (TDim+1) > N; unsigned int freeparticle=0; //we start with the first position in the particles array for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { if (results.size()!=max_results) results.resize(max_results); //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); if (number_of_particles_in_elem<(minimum_number_of_particles))// && (ielem->GetGeometry())[0].Y()<0.10 ) { //KRATOS_WATCH("elem with little particles") Geometry< Node<3> >& geom = ielem->GetGeometry(); ComputeGaussPointPositionsForPreReseed(geom, pos, N); //double conductivity = ielem->GetProperties()[CONDUCTIVITY]; //KRATOS_WATCH(conductivity); for (unsigned int j = 0; j < (pos.size1()); j++) //i am dropping the last one, the one in the middle of the element { bool keep_looking = true; while(keep_looking) { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { mparticles_vector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; /* else if (freeparticle<(it_end_particle_model_part-1)) freeparticle++; */ else freeparticle++; //break; } else { //if (freeparticle<(it_end_particle_model_part-1)) freeparticle++; //else //break; //we finished the list and we couldnt find a free space } } PFEM_Particle_Fluid pparticle(pos(j,0),pos(j,1),pos(j,2)); /* PFEM_Particle_Fluid & pparticle = mparticles_vector[freeparticle]; pparticle.X() = pos(j,0); pparticle.Y() = pos(j,1); pparticle.Z() = pos(j,2); */ array_1d<double,TDim+1>aux2_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N); if (is_found==false) { KRATOS_WATCH(aux2_N); } pparticle.GetEraseFlag()=false; OverwriteParticleDataUsingTopographicDomain(pparticle,pelement,mcalculation_domain_complete_displacement,result_begin, max_results); //and we copy it to the array: mparticles_vector[freeparticle] = pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle]; number_of_particles_in_elem++; } } } } KRATOS_CATCH("") } void CalculateVelOverElemSize() { KRATOS_TRY //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const double nodal_weight = 1.0/ (1.0 + double (TDim) ); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); if (muse_mesh_velocity_to_convect==false) { #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Geometry<Node<3> >& geom = ielem->GetGeometry(); array_1d<double, 3 >vector_mean_velocity=ZeroVector(3); for (unsigned int i=0; i != (TDim+1) ; i++) vector_mean_velocity += geom[i].FastGetSolutionStepValue(VELOCITY); vector_mean_velocity *= nodal_weight; const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) ); ielem->SetValue(VELOCITY_OVER_ELEM_SIZE, mean_velocity / ( ielem->GetValue(MEAN_SIZE) ) ); } } } else { #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Geometry<Node<3> >& geom = ielem->GetGeometry(); array_1d<double, 3 >vector_mean_velocity=ZeroVector(3); for (unsigned int i=0; i != (TDim+1) ; i++) vector_mean_velocity += geom[i].FastGetSolutionStepValue(VELOCITY)-geom[i].FastGetSolutionStepValue(MESH_VELOCITY); vector_mean_velocity *= nodal_weight; const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) ); ielem->SetValue(VELOCITY_OVER_ELEM_SIZE, mean_velocity / ( ielem->GetValue(MEAN_SIZE) ) ); } } } KRATOS_CATCH("") } //name self explained void ResetBoundaryConditions(bool fully_reset_nodes) { KRATOS_TRY if (fully_reset_nodes) { ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; if (inode->IsFixed(VELOCITY_X)) { inode->FastGetSolutionStepValue(VELOCITY_X)=inode->GetSolutionStepValue(VELOCITY_X,1); } if (inode->IsFixed(VELOCITY_Y)) { inode->FastGetSolutionStepValue(VELOCITY_Y)=inode->GetSolutionStepValue(VELOCITY_Y,1); } if (TDim==3) if (inode->IsFixed(VELOCITY_Z)) { inode->FastGetSolutionStepValue(VELOCITY_Z)=inode->GetSolutionStepValue(VELOCITY_Z,1); } if (inode->IsFixed(PRESSURE)) inode->FastGetSolutionStepValue(PRESSURE)=inode->GetSolutionStepValue(PRESSURE,1); inode->GetSolutionStepValue(PRESSURE,1)=inode->FastGetSolutionStepValue(PRESSURE); } } } else //for fractional step only! { ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; const array_1d<double, 3 > original_velocity = inode->FastGetSolutionStepValue(VELOCITY); if (inode->IsFixed(VELOCITY_X) || inode->IsFixed(VELOCITY_Y) || inode->IsFixed(VELOCITY_Z) ) { const array_1d<double, 3 > & normal = inode->FastGetSolutionStepValue(NORMAL); const double normal_scalar_sq = normal[0]*normal[0]+normal[1]*normal[1]+normal[2]*normal[2]; const array_1d<double, 3 > normal_adimensionalized = normal / sqrt(normal_scalar_sq); array_1d<double, 3 > & velocity = inode->FastGetSolutionStepValue(VELOCITY); array_1d<double, 3 > normal_velocity; for (unsigned int j=0; j!=3; j++) normal_velocity[j] = fabs(normal_adimensionalized[j])*original_velocity[j]; if (inode->IsFixed(VELOCITY_X)) { velocity[0] = original_velocity[0] - normal_velocity[0]; } if (inode->IsFixed(VELOCITY_Y)) { velocity[1] = original_velocity[1] - normal_velocity[1]; } if (TDim==3) if (inode->IsFixed(VELOCITY_Z)) { velocity[2] = original_velocity[2] - normal_velocity[2]; } } if (inode->IsFixed(PRESSURE)) inode->FastGetSolutionStepValue(PRESSURE)=inode->GetSolutionStepValue(PRESSURE,1); } } } KRATOS_CATCH("") } //setting the normal component of the velocity to zero void ResetBoundaryConditionsSlip() { KRATOS_TRY { ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; if(inode->Is(SLIP)) { array_1d<double, 3 >& velocity = inode->FastGetSolutionStepValue(VELOCITY); const array_1d<double, 3 > & normal = inode->FastGetSolutionStepValue(NORMAL); const double normal_scalar_sq = normal[0]*normal[0]+normal[1]*normal[1]+normal[2]*normal[2]; const array_1d<double, 3 > normal_adimensionalized = normal / sqrt(normal_scalar_sq); //calculating the normal component of the velocity array_1d<double, 3 > normal_velocity; for (unsigned int j=0; j!=3; j++) normal_velocity[j] = normal_adimensionalized[j]*velocity[j]; const double dot_prod = normal_velocity[0]*velocity[0] + normal_velocity[1]*velocity[1] + normal_velocity[2]*velocity[2]; //if the dot product of velocity * normal velocity is lower than zero, then they have opposite signs and we must invert the direction: if (dot_prod<0.0) normal_velocity*= -1.0; velocity -= normal_velocity; //substracting the normal component } else if (inode->IsFixed(VELOCITY_X) && inode->IsFixed(VELOCITY_Y) ) { inode->FastGetSolutionStepValue(VELOCITY) = inode->GetSolutionStepValue(VELOCITY,1); } } } } KRATOS_CATCH("") } void CalculateDeltaVelocity() { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DELTA_VELOCITY) = inode->FastGetSolutionStepValue(VELOCITY) - inode->FastGetSolutionStepValue(PROJECTED_VELOCITY) ; } } KRATOS_CATCH("") } void CopyVectorVarToPreviousTimeStep(const Variable< array_1d<double, 3 > >& OriginVariable, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; noalias(inode->GetSolutionStepValue(OriginVariable,1)) = inode->FastGetSolutionStepValue(OriginVariable); } } KRATOS_CATCH("") } void CopyScalarVarToPreviousTimeStep(const Variable<double>& OriginVariable, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->GetSolutionStepValue(OriginVariable,1) = inode->FastGetSolutionStepValue(OriginVariable); } } KRATOS_CATCH("") } //to move all the particles across the streamlines. heavy task! void MoveParticles(const bool discriminate_streamlines) //,const bool pressure_gradient_integrate) { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //moveparticlesdiff reads from the pointers of one part (ie odd) and saves into the other part (ie even part) //since it is the only function in the whole procedure that does this, it must use alternatively one part and the other. //KRATOS_WATCH(offset) bool even_timestep; if (offset!=0) even_timestep=false; else even_timestep=true; const int post_offset = mmaximum_number_of_particles*int(even_timestep); //and we also save the offset to know the location in which we will save the pointers after we've moved the particles //KRATOS_WATCH(post_offset) double delta_t = CurrentProcessInfo[DELTA_TIME]; const array_1d<double,3> gravity= CurrentProcessInfo[GRAVITY]; array_1d<double,TDim+1> N; const unsigned int max_results = 10000; //double integration_distance= 2.0; max_nsubsteps = 10; max_substep_dt=delta_t/double(max_nsubsteps); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); //before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element. #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii; int & number_of_particles = old_element->GetValue(NUMBER_OF_FLUID_PARTICLES); mnumber_of_particles_in_elems_aux(ii)=number_of_particles; mnumber_of_particles_in_elems(ii)=0; //we reset the local vectors for a faster access; } } bool nonzero_mesh_velocity = false; //seeing if we have to use the mesh_velocity or not for(ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode!=mr_model_part.NodesEnd(); inode++) { const array_1d<double, 3 > velocity = inode->FastGetSolutionStepValue(MESH_VELOCITY); for(unsigned int i = 0; i!=3; i++) { if (fabs(velocity[i])>1.0e-9) nonzero_mesh_velocity=true; } if( nonzero_mesh_velocity==true) break; } if ( nonzero_mesh_velocity==true) muse_mesh_velocity_to_convect = true; // if there is mesh velocity, then we have to take it into account when moving the particles else muse_mesh_velocity_to_convect = false; //otherwise, we can avoid reading the values since we know it is zero everywhere (to save time!) KRATOS_INFO("MoveParticleUtilityPfem2") << "Convecting particles" << std::endl; //We move the particles across the fixed mesh and saving change data into them (using the function MoveParticle) const bool local_use_mesh_velocity_to_convect = muse_mesh_velocity_to_convect; #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { const array_1d<double,3> mesh_displacement = mcalculation_domain_added_displacement; //if it is a standard problem, displacements are zero and therefore nothing is added. ResultContainerType results(max_results); GlobalPointersVector< Element > elements_in_trajectory; elements_in_trajectory.resize(20); for(unsigned int ielem=element_partition[kkk]; ielem<element_partition[kkk+1]; ielem++) { //for(unsigned int ielem=0; ielem<mr_model_part.Elements().size(); ielem++) //{ ModelPart::ElementsContainerType::iterator old_element = ielembegin+ielem; const int old_element_id = old_element->Id(); ParticlePointerVector& old_element_particle_pointers = *mpointers_to_particle_pointers_vectors(old_element_id-1); if ( (results.size()) !=max_results) results.resize(max_results); unsigned int number_of_elements_in_trajectory=0; //excluding the origin one (current one, ielem) for(int ii=0; ii<(mnumber_of_particles_in_elems_aux(ielem)); ii++) { PFEM_Particle_Fluid & pparticle = old_element_particle_pointers[offset+ii]; Element::Pointer pcurrent_element( *old_element.base() ); ResultIteratorType result_begin = results.begin(); bool & erase_flag=pparticle.GetEraseFlag(); if (erase_flag==false){ MoveParticle(pparticle,pcurrent_element,elements_in_trajectory,number_of_elements_in_trajectory,result_begin,max_results, mesh_displacement, discriminate_streamlines, local_use_mesh_velocity_to_convect); //saqué N de los argumentos, no lo necesito ya q empieza SIEMPRE en un nodo y no me importa donde termina const int current_element_id = pcurrent_element->Id(); int & number_of_particles_in_current_elem = mnumber_of_particles_in_elems(current_element_id-1); //int & number_of_water_particles_in_current_elem = mnumber_of_water_particles_in_elems(current_element_id-1); if (number_of_particles_in_current_elem<mmaximum_number_of_particles && erase_flag==false) { { ParticlePointerVector& current_element_particle_pointers = *mpointers_to_particle_pointers_vectors(current_element_id-1); #pragma omp critical { if (number_of_particles_in_current_elem<mmaximum_number_of_particles) // we cant go over this node, there's no room. otherwise we would be in the position of the first particle of the next element!! { current_element_particle_pointers(post_offset+number_of_particles_in_current_elem) = &pparticle; number_of_particles_in_current_elem++ ; if (number_of_particles_in_current_elem>mmaximum_number_of_particles) KRATOS_WATCH("MAL"); } else pparticle.GetEraseFlag()=true; //so we just delete it! } } } else pparticle.GetEraseFlag()=true; //so we just delete it! } } } } //now we pass info from the local vector to the elements: #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii; old_element->GetValue(NUMBER_OF_FLUID_PARTICLES) = mnumber_of_particles_in_elems(ii); //old_element->GetValue(NUMBER_OF_WATER_PARTICLES) = mnumber_of_water_particles_in_elems(ii); } } //after having changed everything we change the status of the modd_timestep flag: CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET] = post_offset;; // KRATOS_CATCH("") } void TransferLagrangianToEulerian() //explicit { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); //const double delta_t =CurrentProcessInfo[DELTA_TIME]; const double threshold= 0.0/(double(TDim)+1.0); KRATOS_INFO("MoveParticleUtilityPfem2") << "Projecting info to mesh" << std::endl; const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //KRATOS_WATCH(offset) //(flag managed only by MoveParticles //we must project data from the particles (lagrangian) into the eulerian mesh //ValuesVectorType eulerian_nodes_old_temperature; //int nnodes = mr_model_part.Nodes().size(); //array_1d<double,(n_nodes)> eulerian_nodes_sumweights; //we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes //though we could've use a bigger buffer, to be changed later! //after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles. ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DISTANCE)=0.0; inode->FastGetSolutionStepValue(PROJECTED_VELOCITY)=ZeroVector(3); inode->FastGetSolutionStepValue(YP)=0.0; } } //adding contribution, loop on elements, since each element has stored the particles found inside of it vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; array_1d<double,3*(TDim+1)> nodes_positions; array_1d<double,3*(TDim+1)> nodes_addedvel = ZeroVector(3*(TDim+1)); array_1d<double,(TDim+1)> nodes_added_distance = ZeroVector((TDim+1)); array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1)); //array_1d<double,(TDim+1)> weighting_inverse_divisor; Geometry<Node<3> >& geom = ielem->GetGeometry(); for (int i=0 ; i!=(TDim+1) ; ++i) { nodes_positions[i*3+0]=geom[i].X(); nodes_positions[i*3+1]=geom[i].Y(); nodes_positions[i*3+2]=geom[i].Z(); //weighting_inverse_divisor[i]=1.0/((geom[i].FastGetSolutionStepValue(MEAN_SIZE))*1.01); } ///KRATOS_WATCH(ielem->Id()) ///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size()); int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; PFEM_Particle_Fluid & pparticle = element_particle_pointers[offset+iii]; if (pparticle.GetEraseFlag()==false) { array_1d<double,3> & position = pparticle.Coordinates(); const array_1d<float,3>& velocity = pparticle.GetVelocity(); const float& particle_distance = pparticle.GetDistance(); // -1 if water, +1 if air array_1d<double,TDim+1> N; bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N); if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element. { KRATOS_WATCH(N); for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 && N[j]> -1e-5) N[j]=1e-10; } for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element { //double sq_dist = 0; //these lines for a weighting function based on the distance (or square distance) from the node insteadof the shape functions //for (int k=0 ; k!=(TDim); k++) sq_dist += ((position[k] - nodes_positions[j*3+k])*(position[k] - nodes_positions[j*3+k])); //double weight = (1.0 - (sqrt(sq_dist)*weighting_inverse_divisor[j] ) ); double weight=N(j); //weight=N(j)*N(j)*N(j); if (weight<threshold) weight=1e-10; if (weight<0.0) {KRATOS_WATCH(weight)}//;weight=0.0;KRATOS_WATCH(velocity);KRATOS_WATCH(N);KRATOS_WATCH(number_of_particles_in_elem);}//{KRATOS_WATCH(weight); KRATOS_WATCH(geom[j].Id()); KRATOS_WATCH(position);} else { nodes_addedweights[j]+= weight; //nodes_addedtemp[j] += weight * particle_temp; nodes_added_distance[j] += weight*particle_distance; //nodes_added_oxygen[j] += weight*particle_oxygen; for (int k=0 ; k!=(TDim); k++) //x,y,(z) { nodes_addedvel[j*3+k] += weight * double(velocity[k]); } }// } } } for (int i=0 ; i!=(TDim+1) ; ++i) { geom[i].SetLock(); geom[i].FastGetSolutionStepValue(DISTANCE) +=nodes_added_distance[i]; geom[i].FastGetSolutionStepValue(PROJECTED_VELOCITY_X) +=nodes_addedvel[3*i+0]; geom[i].FastGetSolutionStepValue(PROJECTED_VELOCITY_Y) +=nodes_addedvel[3*i+1]; geom[i].FastGetSolutionStepValue(PROJECTED_VELOCITY_Z) +=nodes_addedvel[3*i+2]; //we are updating info to the previous time step!! geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i]; geom[i].UnSetLock(); } } } #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; double sum_weights = inode->FastGetSolutionStepValue(YP); if (sum_weights>0.00001) { //inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT)=(inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT))/sum_weights; //resetting the temperature double & dist = inode->FastGetSolutionStepValue(DISTANCE); dist /=sum_weights; //resetting the density inode->FastGetSolutionStepValue(PROJECTED_VELOCITY)=(inode->FastGetSolutionStepValue(PROJECTED_VELOCITY))/sum_weights; //resetting the velocity } else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case.. { inode->FastGetSolutionStepValue(DISTANCE)=3.0; //resetting the temperature //inode->FastGetSolutionStepValue(DISTANCE)=inode->GetSolutionStepValue(DISTANCE,1); //resetting the temperature inode->FastGetSolutionStepValue(PROJECTED_VELOCITY)=inode->GetSolutionStepValue(VELOCITY,1); } ///finally, if there was an inlet that had a fixed position for the distance function, that has to remain unchanged: if (inode->IsFixed(DISTANCE)) inode->FastGetSolutionStepValue(DISTANCE)=inode->GetSolutionStepValue(DISTANCE,1); } } KRATOS_CATCH("") } void TransferLagrangianToEulerianImp() //semi implicit { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); std::cout << "projecting info to mesh (semi implicit)" << std::endl; const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //KRATOS_WATCH(offset) //(flag managed only by MoveParticles //we must project data from the particles (lagrangian) into the eulerian mesh //ValuesVectorType eulerian_nodes_old_temperature; //int nnodes = mr_model_part.Nodes().size(); //array_1d<double,(n_nodes)> eulerian_nodes_sumweights; //we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes //though we could've use a bigger buffer, to be changed later! //after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles. ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DISTANCE)=0.0; inode->FastGetSolutionStepValue(PROJECTED_VELOCITY)=ZeroVector(3); inode->FastGetSolutionStepValue(YP)=0.0; } } //adding contribution, loop on elements, since each element has stored the particles found inside of it vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { //creating a matrix for each of the problems. BoundedMatrix<double, TDim+1 , TDim+1 > mass_matrix; // WE ONLY NEED ONE! they are the same for all the variables! //_x,mass_matrix_y,mass_matrix_z,mass_matrix_d; //mass matrices for the projected vel (x,y,z) and the distance array_1d<double,(TDim+1)> rhs_x,rhs_y,rhs_z,rhs_d; array_1d<double,3*(TDim+1)> nodes_positions; array_1d<double,3*(TDim+1)> nodes_addedvel = ZeroVector(3*(TDim+1)); array_1d<double,(TDim+1)> nodes_added_distance = ZeroVector((TDim+1)); array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1)); for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; nodes_addedvel = ZeroVector(3*(TDim+1)); //resetting vectors nodes_added_distance = ZeroVector((TDim+1)); //resetting vectors nodes_addedweights = ZeroVector((TDim+1)); //resetting vectors mass_matrix = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices. WE ONLY NEED ONE! they are the same for all the variable. only the rhs changes. //mass_matrix_y = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices //mass_matrix_z = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices //mass_matrix_d = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices rhs_x = ZeroVector((TDim+1)); //resetting vectors rhs_y = ZeroVector((TDim+1)); //resetting vectors rhs_z = ZeroVector((TDim+1)); //resetting vectors rhs_d = ZeroVector((TDim+1)); //resetting vectors Geometry<Node<3> >& geom = ielem->GetGeometry(); const double elem_volume = geom.Area(); for (int i=0 ; i!=(TDim+1) ; ++i) //saving the nodal positions for faster access { nodes_positions[i*3+0]=geom[i].X(); nodes_positions[i*3+1]=geom[i].Y(); nodes_positions[i*3+2]=geom[i].Z(); } ///KRATOS_WATCH(ielem->Id()) ///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size()); int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; PFEM_Particle_Fluid & pparticle = element_particle_pointers[offset+iii]; if (pparticle.GetEraseFlag()==false) { array_1d<double,3> & position = pparticle.Coordinates(); const array_1d<float,3>& velocity = pparticle.GetVelocity(); const float& particle_distance = pparticle.GetDistance(); // -1 if water, +1 if air array_1d<double,TDim+1> N; bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N); if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element. { KRATOS_WATCH(N); for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 && N[j]> -1e-5) N[j]=1e-10; } for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element { double weight=N(j); for (int k=0 ; k!=(TDim+1); k++) //building the mass matrix mass_matrix(j,k) += weight*N(k); rhs_x[j] += weight * double(velocity[0]); rhs_y[j] += weight * double(velocity[1]); rhs_z[j] += weight * double(velocity[2]); rhs_d[j] += weight * double(particle_distance); //adding also a part with the lumped mass matrix to reduce overshoots and undershoots if(true) { double this_particle_weight = weight*elem_volume/(double(number_of_particles_in_elem))*0.1; //can be increased or reduced to change the lumped mass contrubtion nodes_addedweights[j]+= this_particle_weight; nodes_added_distance[j] += this_particle_weight*particle_distance; for (int k=0 ; k!=(TDim); k++) //x,y,(z) { nodes_addedvel[j*3+k] += this_particle_weight * double(velocity[k]); } } } } } //now we invert the matrix BoundedMatrix<double, TDim+1 , TDim+1 > inverse_mass_matrix=ZeroMatrix(TDim+1 , TDim+1); if(TDim==3) InvertMatrix( mass_matrix, inverse_mass_matrix); else InvertMatrix3x3( mass_matrix, inverse_mass_matrix); //and now compute the elemental contribution to the gobal system: if(number_of_particles_in_elem>(TDim*3)) //otherwise it's impossible to define a correctly the gradients, therefore the results inside the element are useless. { for (int i=0 ; i!=(TDim+1); i++) { for (int j=0 ; j!=(TDim+1); j++) { nodes_addedvel[3*i+0] += inverse_mass_matrix(i,j)*rhs_x[j]*elem_volume*(1.0/(double(1+TDim))); nodes_addedvel[3*i+1] += inverse_mass_matrix(i,j)*rhs_y[j]*elem_volume*(1.0/(double(1+TDim))); nodes_addedvel[3*i+2] += inverse_mass_matrix(i,j)*rhs_z[j]*elem_volume*(1.0/(double(1+TDim))); nodes_added_distance[i] += inverse_mass_matrix(i,j)*rhs_d[j]*elem_volume*(1.0/(double(1+TDim))); } } //and also to the mass matrix. LUMPED (but for the contribution of the grandient at elemental level. for (int i=0 ; i!=(TDim+1); i++) nodes_addedweights[i] += elem_volume*(1.0/(double(1+TDim))); } for (int i=0 ; i!=(TDim+1) ; ++i) { geom[i].SetLock(); geom[i].FastGetSolutionStepValue(DISTANCE) +=nodes_added_distance[i]; geom[i].FastGetSolutionStepValue(PROJECTED_VELOCITY_X) +=nodes_addedvel[3*i+0]; geom[i].FastGetSolutionStepValue(PROJECTED_VELOCITY_Y) +=nodes_addedvel[3*i+1]; geom[i].FastGetSolutionStepValue(PROJECTED_VELOCITY_Z) +=nodes_addedvel[3*i+2]; //we are updating info to the previous time step!! geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i]; geom[i].UnSetLock(); } } } #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; double sum_weights = inode->FastGetSolutionStepValue(YP); if (sum_weights>0.00001) { //inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT)=(inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT))/sum_weights; //resetting the temperature double & dist = inode->FastGetSolutionStepValue(DISTANCE); dist /=sum_weights; //resetting the density inode->FastGetSolutionStepValue(PROJECTED_VELOCITY)=(inode->FastGetSolutionStepValue(PROJECTED_VELOCITY))/sum_weights; //resetting the velocity } else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case.. { inode->FastGetSolutionStepValue(DISTANCE)=3.0; //resetting the temperature //inode->FastGetSolutionStepValue(DISTANCE)=inode->GetSolutionStepValue(DISTANCE,1); //resetting the temperature inode->FastGetSolutionStepValue(PROJECTED_VELOCITY)=inode->GetSolutionStepValue(VELOCITY,1); } ///finally, if there was an inlet that had a fixed position for the distance function, that has to remain unchanged: if (inode->IsFixed(DISTANCE)) inode->FastGetSolutionStepValue(DISTANCE)=inode->GetSolutionStepValue(DISTANCE,1); } } KRATOS_CATCH("") } void AccelerateParticlesWithoutMovingUsingDeltaVelocity() { KRATOS_TRY //std::cout << "updating particles" << std::endl; ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //(flag managed only by MoveParticles //KRATOS_WATCH(offset) ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Element::Pointer pelement(*ielem.base()); Geometry<Node<3> >& geom = ielem->GetGeometry(); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); //std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { //KRATOS_WATCH(iii) if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; PFEM_Particle_Fluid & pparticle = element_particle_pointers[offset+iii]; bool erase_flag= pparticle.GetEraseFlag(); if (erase_flag==false) { AccelerateParticleUsingDeltaVelocity(pparticle,pelement,geom); //'lite' version, we pass by reference the geometry, so much cheaper } } } } KRATOS_CATCH("") } //************************************************************************************************************** //************************************************************************************************************** template< class TDataType > void AddUniqueWeakPointer (GlobalPointersVector< TDataType >& v, const typename TDataType::WeakPointer candidate) { typename GlobalPointersVector< TDataType >::iterator i = v.begin(); typename GlobalPointersVector< TDataType >::iterator endit = v.end(); while ( i != endit && (i)->Id() != (candidate.lock())->Id()) { i++; } if( i == endit ) { v.push_back(candidate); } } //************************************************************************************************************** //************************************************************************************************************** void PreReseed(int minimum_number_of_particles) { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; const int max_results = 1000; //tools for the paralelization unsigned int number_of_threads = OpenMPUtils::GetNumThreads(); vector<unsigned int> elem_partition; int number_of_rows=mr_model_part.Elements().size(); elem_partition.resize(number_of_threads + 1); int elem_partition_size = number_of_rows / number_of_threads; elem_partition[0] = 0; elem_partition[number_of_threads] = number_of_rows; //KRATOS_WATCH(elem_partition_size); for (unsigned int i = 1; i < number_of_threads; i++) elem_partition[i] = elem_partition[i - 1] + elem_partition_size; const bool local_use_mesh_velocity_to_convect = muse_mesh_velocity_to_convect; #pragma omp parallel firstprivate(elem_partition) { ResultContainerType results(max_results); int k = OpenMPUtils::ThisThread(); ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k]; ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ; //ModelPart::NodesContainerType local_list=aux[k]; //PointerVectorSet<PFEM_Particle_Fluid, IndexedObject> & list=aux[k]; //KRATOS_WATCH(k); BoundedMatrix<double, (TDim+1), 3 > pos; BoundedMatrix<double, (TDim+1) , (TDim+1) > N; unsigned int freeparticle=0; //we start with the first position in the particles array //int local_id=1; for (ModelPart::ElementsContainerType::iterator ielem = it_begin; ielem != it_end; ielem++) { results.resize(max_results); //const int & elem_id = ielem->Id(); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); if (number_of_particles_in_elem<(minimum_number_of_particles))// && (ielem->GetGeometry())[0].Y()<0.10 ) { //KRATOS_WATCH("elem with little particles") Geometry< Node<3> >& geom = ielem->GetGeometry(); ComputeGaussPointPositionsForPreReseed(geom, pos, N); //double conductivity = ielem->GetProperties()[CONDUCTIVITY]; //KRATOS_WATCH(conductivity); for (unsigned int j = 0; j < (pos.size1()); j++) //i am dropping the last one, the one in the middle of the element { bool keep_looking = true; while(keep_looking) { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { mparticles_vector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; else freeparticle++; } else { freeparticle++; } } PFEM_Particle_Fluid pparticle(pos(j,0),pos(j,1),pos(j,2)); array_1d<double,TDim+1>aux2_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N); if (is_found==false) { KRATOS_WATCH(aux2_N); } pparticle.GetEraseFlag()=false; ResultIteratorType result_begin = results.begin(); Element::Pointer pelement( *ielem.base() ); MoveParticle_inverse_way(pparticle, pelement, result_begin, max_results, local_use_mesh_velocity_to_convect); //and we copy it to the array: mparticles_vector[freeparticle] = pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle]; pparticle.GetEraseFlag()=false; number_of_particles_in_elem++; } } } } KRATOS_CATCH("") } //************************************************************************************************************** //************************************************************************************************************** void PostReseed(int minimum_number_of_particles, double mass_correction_factor ) //pooyan's way { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; if (mass_correction_factor>0.5) mass_correction_factor=0.5; if (mass_correction_factor<-0.5) mass_correction_factor=-0.5; //mass_correction_factor=0.0; //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); //const double delta_t = CurrentProcessInfo[DELTA_TIME]; //array_1d<double,3> & gravity= CurrentProcessInfo[GRAVITY]; //const int max_results = 1000; const double threshold = mass_correction_factor*0.5; //TOOLS FOR THE PARALELIZATION //int last_id= (mr_linea_model_part.NodesEnd()-1)->Id(); unsigned int number_of_threads = OpenMPUtils::GetNumThreads(); //KRATOS_WATCH(number_of_threads); vector<unsigned int> elem_partition; int number_of_rows=mr_model_part.Elements().size(); //KRATOS_WATCH(number_of_threads); //KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", ""); elem_partition.resize(number_of_threads + 1); int elem_partition_size = number_of_rows / number_of_threads; elem_partition[0] = 0; elem_partition[number_of_threads] = number_of_rows; //KRATOS_WATCH(elem_partition_size); for (unsigned int i = 1; i < number_of_threads; i++) elem_partition[i] = elem_partition[i - 1] + elem_partition_size; //typedef Node < 3 > PointType; //std::vector<ModelPart::NodesContainerType> aux;// aux; //aux.resize(number_of_threads); //ModelPart::NodesContainerType::iterator it_begin_particle_model_part = mr_linea_model_part.NodesBegin(); //ModelPart::NodesContainerType::iterator it_end_particle_model_part = mr_linea_model_part.NodesEnd(); #pragma omp parallel firstprivate(elem_partition) // firstprivate(results)//we will add the nodes in different parts of aux and later assemple everything toghether, remaming particles ids to get consecutive ids { unsigned int reused_particles=0; unsigned int freeparticle = 0; //we start by the first position; int k = OpenMPUtils::ThisThread(); ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k]; ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ; BoundedMatrix<double, (3+2*TDim), 3 > pos; //7 particles (2D) or 9 particles (3D) BoundedMatrix<double, (3+2*TDim), (TDim+1) > N; array_1d<double, 3 > vel_complete, vel_without_air_nodes; double sum_Ns_without_air_nodes; double mesh_distance; array_1d<double, (3+2*TDim) > distances; array_1d<int, (3+2*TDim) > positions; array_1d<bool, (3+2*TDim) > is_water_particle; //for both unsigned int number_of_reseeded_particles; //unsigned int number_of_water_reseeded_particles; //array_1d<double, 3 > nodes_distances; //int local_id=1; for (ModelPart::ElementsContainerType::iterator ielem = it_begin; ielem != it_end; ielem++) { //results.resize(max_results); int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); Geometry< Node<3> >& geom = ielem->GetGeometry(); if ( (number_of_particles_in_elem<(minimum_number_of_particles)))// && (geom[0].Y()<0.10) ) || (number_of_water_particles_in_elem>2 && number_of_particles_in_elem<(minimum_number_of_particles) ) ) { //bool reseed_more=false; number_of_reseeded_particles=0; //reseed_more=true; number_of_reseeded_particles= 3+2*TDim; ComputeGaussPointPositionsForPostReseed(geom, pos, N); distances = ZeroVector(3+2*TDim); bool has_water_node=false; bool has_air_node=false; double mean_element_distance = 0.0; for (unsigned int j = 0; j < (TDim+1); j++) { mean_element_distance += (1.0/double(TDim+1))*(geom[j].FastGetSolutionStepValue(DISTANCE)); if ((geom[j].FastGetSolutionStepValue(DISTANCE))<0.0) has_water_node=true; else has_air_node=true; } //first we check the particle distance according to the nodal values for (unsigned int j = 0; j < number_of_reseeded_particles; j++) //first we order particles { positions[j]=j+1; //just creating a vector from 1 to 7 or whathever our lenght is (7 for 2d, 9 for 3d) for (unsigned int l = 0; l < (TDim+1); l++) { distances[j] += N(j, l) * geom[l].FastGetSolutionStepValue(DISTANCE); } } if ( (has_air_node && has_water_node) ) //for slit elements we use the distance function { for (unsigned int j = 0; j < number_of_reseeded_particles ; j++) //first we order particles { if (distances[j]>threshold) is_water_particle[j]=false; else is_water_particle[j]=true; } } else if (has_air_node) { double water_fraction = 0.5 - 0.5*(mean_element_distance); if (water_fraction>0.9 && mass_correction_factor<0.0) //to avoid seeding air particles when we are in a pure water element mass_correction_factor = 0.0; unsigned int number_of_water_reseeded_particles = double(number_of_reseeded_particles)*(1.01+mass_correction_factor*1.0)*water_fraction; BubbleSort(distances, positions, number_of_reseeded_particles); //ok. now we have the particles ordered from the "watermost" to "airmost". therefore we will fill the water particles and later the air ones using that order for (unsigned int j = 0; j < number_of_reseeded_particles ; j++) //first we order particles { int array_position = positions[j]-1; if (array_position>3 && number_of_reseeded_particles==4) { KRATOS_WATCH("error in reseeding") } if ( (j+1) <= number_of_water_reseeded_particles ) //means it is a water particle is_water_particle[array_position]=true; else is_water_particle[array_position]=false; } } else //only water particles { for (unsigned int j = 0; j < number_of_reseeded_particles ; j++) //first we order particles is_water_particle[j]=true; } bool fix_distance = false; unsigned int node_with_fixed_distance = 0; for (unsigned int j = 0; j < (TDim+1) ; j++) //we go over the 3/4 nodes: { if ((geom[j].IsFixed(DISTANCE))) { fix_distance = true; node_with_fixed_distance = j; } } // so now if the 3 were fixed, we assign the sign of the first node to all the particles: if (fix_distance) { bool is_water_for_all_particles=true; if ((geom[node_with_fixed_distance].FastGetSolutionStepValue(DISTANCE))>0.0) is_water_for_all_particles=false; for (unsigned int j = 0; j < number_of_reseeded_particles ; j++) //first we order particles is_water_particle[j]=is_water_for_all_particles; } for (unsigned int j = 0; j < number_of_reseeded_particles; j++) { //now we have to find an empty space ( a particle that was about to be deleted) in the particles model part. once found. there will be our renewed particle: bool keep_looking = true; while(keep_looking) { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { #pragma omp critical { if (mparticles_vector[freeparticle].GetEraseFlag()==true) { mparticles_vector[freeparticle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; else freeparticle++; } else { freeparticle++; } } PFEM_Particle_Fluid pparticle(pos(j,0),pos(j,1),pos(j,2)); array_1d<float, 3 > & vel = pparticle.GetVelocity(); float& distance= pparticle.GetDistance(); array_1d<double,TDim+1>aux_N; bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux_N); if (is_found==false) { KRATOS_WATCH(aux_N); KRATOS_WATCH(j) KRATOS_WATCH(ielem->Id()) } noalias(vel_complete)=ZeroVector(3); noalias(vel_without_air_nodes)=ZeroVector(3); sum_Ns_without_air_nodes=0.0; noalias(vel) = ZeroVector(3); distance=0.0; mesh_distance = 0.0; //oxygen = 0.0; for (unsigned int l = 0; l < (TDim+1); l++) { noalias(vel_complete) += N(j, l) * geom[l].FastGetSolutionStepValue(VELOCITY); mesh_distance += N(j,l) * geom[l].FastGetSolutionStepValue(DISTANCE); if ((geom[l].FastGetSolutionStepValue(DISTANCE))<0.0) { sum_Ns_without_air_nodes+=N(j, l); noalias(vel_without_air_nodes) += N(j, l) * geom[l].FastGetSolutionStepValue(VELOCITY); } } ///COMMENT TO GET A CONTINOUS DISTANCE FUNCTION FIELD if (is_water_particle[j]) { distance=-1.0; } else { //if (mesh_distance<2.0) distance=1.0; //else // distance=3.0; } if (distance<0.0 && sum_Ns_without_air_nodes>0.01) vel = vel_without_air_nodes / sum_Ns_without_air_nodes ; else vel = vel_complete; pparticle.GetEraseFlag()=false; mparticles_vector[freeparticle]=pparticle; element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle]; number_of_particles_in_elem++; if (keep_looking) { KRATOS_THROW_ERROR(std::logic_error, "FINISHED THE LIST AND COULDNT FIND A FREE CELL FOR THE NEW PARTICLE!", ""); } else { reused_particles++; } } } } } KRATOS_CATCH("") } void ExecuteParticlesPritingTool( ModelPart& lagrangian_model_part, int input_filter_factor ) { KRATOS_TRY //mfilter_factor; //we will only print one out of every "filter_factor" particles of the total particle list if(mparticle_printing_tool_initialized==false) { mfilter_factor=input_filter_factor; if(lagrangian_model_part.NodesBegin()-lagrangian_model_part.NodesEnd()>0) KRATOS_THROW_ERROR(std::logic_error, "AN EMPTY MODEL PART IS REQUIRED FOR THE PRINTING OF PARTICLES", ""); lagrangian_model_part.AddNodalSolutionStepVariable(VELOCITY); lagrangian_model_part.AddNodalSolutionStepVariable(DISPLACEMENT); lagrangian_model_part.AddNodalSolutionStepVariable(DISTANCE); for (unsigned int i=0; i!=((mmaximum_number_of_particles*mnelems)/mfilter_factor)+mfilter_factor; i++) { Node < 3 > ::Pointer pnode = lagrangian_model_part.CreateNewNode( i+mlast_node_id+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!! //pnode->SetBufferSize(mr_model_part.NodesBegin()->GetBufferSize()); pnode->SetBufferSize(1); } mparticle_printing_tool_initialized=true; } //resetting data of the unused particles const double inactive_particle_position= -10.0; array_1d<double,3>inactive_particle_position_vector; inactive_particle_position_vector(0)=inactive_particle_position; inactive_particle_position_vector(1)=inactive_particle_position; inactive_particle_position_vector(2)=inactive_particle_position; ModelPart::NodesContainerType::iterator inodebegin = lagrangian_model_part.NodesBegin(); for(unsigned int ii=0; ii<lagrangian_model_part.Nodes().size(); ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DISTANCE) = 0.0; inode->FastGetSolutionStepValue(VELOCITY) = ZeroVector(3); inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector; } int counter=0; //ModelPart::NodesContainerType::iterator it_begin = lagrangian_model_part.NodesBegin(); for (int i=0; i!=mmaximum_number_of_particles*mnelems; i++) { PFEM_Particle_Fluid& pparticle =mparticles_vector[i]; if(pparticle.GetEraseFlag()==false && i%mfilter_factor==0) { ModelPart::NodesContainerType::iterator inode = inodebegin+counter; //copying info from the particle to the (printing) node. inode->FastGetSolutionStepValue(DISTANCE) = pparticle.GetDistance(); inode->FastGetSolutionStepValue(VELOCITY) = pparticle.GetVelocity(); inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates(); counter++; } } KRATOS_CATCH("") } void ExecuteParticlesPritingToolForDroppletsOnly( ModelPart& lagrangian_model_part, int input_filter_factor ) { KRATOS_TRY //mfilter_factor; //we will only print one out of every "filter_factor" particles of the total particle list const int first_particle_id=1000000; if(mparticle_printing_tool_initialized==false) { mfilter_factor=input_filter_factor; if(lagrangian_model_part.NodesBegin()-lagrangian_model_part.NodesEnd()>0) KRATOS_THROW_ERROR(std::logic_error, "AN EMPTY MODEL PART IS REQUIRED FOR THE PRINTING OF PARTICLES", ""); lagrangian_model_part.AddNodalSolutionStepVariable(VELOCITY); lagrangian_model_part.AddNodalSolutionStepVariable(DISPLACEMENT); lagrangian_model_part.AddNodalSolutionStepVariable(DISTANCE); for (unsigned int i=0; i!=((mmaximum_number_of_particles*mnelems)/mfilter_factor)+mfilter_factor; i++) { Node < 3 > ::Pointer pnode = lagrangian_model_part.CreateNewNode( i+first_particle_id+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!! //pnode->SetBufferSize(mr_model_part.NodesBegin()->GetBufferSize()); pnode->SetBufferSize(1); } mparticle_printing_tool_initialized=true; } //resetting data of the unused particles const double inactive_particle_position= -10.0; array_1d<double,3>inactive_particle_position_vector; inactive_particle_position_vector(0)=inactive_particle_position; inactive_particle_position_vector(1)=inactive_particle_position; inactive_particle_position_vector(2)=inactive_particle_position; ModelPart::NodesContainerType::iterator inodebegin = lagrangian_model_part.NodesBegin(); for(unsigned int ii=0; ii<lagrangian_model_part.Nodes().size(); ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(DISTANCE) = 0.0; inode->FastGetSolutionStepValue(VELOCITY) = ZeroVector(3); inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector; } const int max_number_of_printed_particles=lagrangian_model_part.Nodes().size(); ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //(flag managed only by MoveParticles //KRATOS_WATCH(offset) ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); int counter=0; for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Element::Pointer pelement(*ielem.base()); Geometry<Node<3> >& geom = ielem->GetGeometry(); //double mean_elem_dist=0.0; bool pure_air_elem=true; for(unsigned int j=0; j<(TDim+1); j++) { if (geom[j].FastGetSolutionStepValue(DISTANCE)<0.0) pure_air_elem=false; //mean_elem_dist += geom[j].FastGetSolutionStepValue(DISTANCE); } //if (mean_elem_dist>0.0) //only air elements if (pure_air_elem==true) { ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); //std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { //KRATOS_WATCH(iii) if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; PFEM_Particle_Fluid & pparticle = element_particle_pointers[offset+iii]; bool erase_flag= pparticle.GetEraseFlag(); if (erase_flag==false && pparticle.GetDistance()<0.0) { ModelPart::NodesContainerType::iterator inode = inodebegin+counter; //copying info from the particle to the (printing) node. inode->FastGetSolutionStepValue(DISTANCE) = pparticle.GetDistance(); inode->FastGetSolutionStepValue(VELOCITY) = pparticle.GetVelocity(); inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates(); counter++; } } } if (counter>(max_number_of_printed_particles-30)) //we are approaching the end of the model part. so we stop before it's too late break; } KRATOS_CATCH("") } void AssignNodalVelocityUsingInletConditions(const double inlet_vel) { KRATOS_TRY //first we are going to delete all the velocities! ModelPart::ConditionsContainerType::iterator iconditionbegin = mr_model_part.ConditionsBegin(); vector<unsigned int> condition_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Conditions().size(), condition_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=condition_partition[kkk]; ii<condition_partition[kkk+1]; ii++) { ModelPart::ConditionsContainerType::iterator icondition = iconditionbegin+ii; if ( icondition->GetValue(IS_INLET) > 0.5 ) { Geometry<Node<3> >& geom = icondition->GetGeometry(); array_1d<double,3> normal = ZeroVector(3); this->CalculateNormal(geom,normal); const double normal_lenght = sqrt(normal[0]*normal[0] + normal[1]*normal[1] + normal[2]*normal[2]); const array_1d<double,3> velocity = - inlet_vel/normal_lenght * normal; for (unsigned int l = 0; l < (TDim); l++) { geom[l].SetLock(); geom[l].FastGetSolutionStepValue(VELOCITY) = velocity; geom[l].UnSetLock(); } } } } KRATOS_CATCH("") } void RotateParticlesAndDomainVelocities(array_1d<double, 3 > rotations) { KRATOS_TRY if(fabs(rotations[0])>0.000000001 || fabs(rotations[1])>0.000000001) KRATOS_THROW_ERROR(std::invalid_argument,"ROTATIONS ONLY IMPLEMENTED AROUND Z AXIS! (xy plane) ",""); const double cosinus_theta = cos(rotations[2]); const double sinus_theta = sin(rotations[2]); //std::cout << "updating particles" << std::endl; ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const int offset = CurrentProcessInfo[WATER_PARTICLE_POINTERS_OFFSET]; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //(flag managed only by MoveParticles //KRATOS_WATCH(offset) ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { //const int & elem_id = ielem->Id(); ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; Element::Pointer pelement(*ielem.base()); ParticlePointerVector& element_particle_pointers = (ielem->GetValue(FLUID_PARTICLE_POINTERS)); int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_FLUID_PARTICLES); //std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl; for (int iii=0; iii<number_of_particles_in_elem ; iii++ ) { //KRATOS_WATCH(iii) if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop! break; PFEM_Particle_Fluid & pparticle = element_particle_pointers[offset+iii]; bool erase_flag= pparticle.GetEraseFlag(); if (erase_flag==false) { array_1d<float, 3 > & vel = pparticle.GetVelocity(); const float vel_x = vel[0]; const float vel_y = vel[1]; vel[0] = cosinus_theta*vel_x + sinus_theta*vel_y; vel[1] = cosinus_theta*vel_y - sinus_theta*vel_x; } } } } ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; if (inode->IsFixed(VELOCITY_X)==false) { array_1d<double, 3 > & vel = inode->FastGetSolutionStepValue(VELOCITY); const double vel_x = vel[0]; const double vel_y = vel[1]; vel[0] = cosinus_theta*vel_x + sinus_theta*vel_y; vel[1] = cosinus_theta*vel_y - sinus_theta*vel_x; } } } KRATOS_CATCH("") } protected: private: void Check() { if(mr_model_part.NodesBegin()->SolutionStepsDataHas(DISTANCE) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing DISTANCE variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(VELOCITY) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing VELOCITY variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(PRESSURE) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing PRESSURE variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(PROJECTED_VELOCITY) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing PROJECTED_VELOCITY variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(DELTA_VELOCITY) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing DELTA_VELOCITY variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(MESH_VELOCITY) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing MESH_VELOCITY variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(YP) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing YP variable on solution step data",""); if(mr_model_part.NodesBegin()->SolutionStepsDataHas(NORMAL) == false) KRATOS_THROW_ERROR(std::invalid_argument,"missing NORMAL variable on solution step data",""); } ///this function moves a particle according to the "velocity" given ///by "rVariable". The movement is performed in nsubsteps, during a total time ///of Dt void MoveParticle( PFEM_Particle_Fluid & pparticle, Element::Pointer & pelement, GlobalPointersVector< Element >& elements_in_trajectory, unsigned int & number_of_elements_in_trajectory, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults, const array_1d<double,3> mesh_displacement, const bool discriminate_streamlines, const bool use_mesh_velocity_to_convect) { ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; array_1d<double,3> & gravity= CurrentProcessInfo[GRAVITY]; unsigned int nsubsteps; double substep_dt; bool KEEP_INTEGRATING=false; bool is_found; //bool have_air_node; //bool have_water_node; array_1d<double,3> vel; array_1d<double,3> vel_without_other_phase_nodes=ZeroVector(3); array_1d<double,3> position; array_1d<double,3> mid_position; array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. position = pparticle.Coordinates(); //initial coordinates const float particle_distance = pparticle.GetDistance(); array_1d<float,3> particle_velocity = pparticle.GetVelocity(); //double distance=0.0; array_1d<double,3> last_useful_vel; double sum_Ns_without_other_phase_nodes; //double pressure=0.0; ///***** //bool flying_water_particle=true; //if a water particle does not find a water element in its whole path, then we add the gravity*dt double only_integral = 0.0 ; is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { KEEP_INTEGRATING=true; Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel=ZeroVector(3); vel_without_other_phase_nodes = ZeroVector(3); sum_Ns_without_other_phase_nodes=0.0; //distance=0.0; if (particle_distance<0.0 && discriminate_streamlines==true) { for(unsigned int j=0; j<(TDim+1); j++) { if ((geom[j].FastGetSolutionStepValue(DISTANCE))<0.0) //ok. useful info! { sum_Ns_without_other_phase_nodes += N[j]; noalias(vel_without_other_phase_nodes) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel_without_other_phase_nodes) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } if (sum_Ns_without_other_phase_nodes>0.01) { vel = vel_without_other_phase_nodes / sum_Ns_without_other_phase_nodes; //flying_water_particle=false; } else { vel = particle_velocity; if (use_mesh_velocity_to_convect) { for(unsigned int j=0; j<(TDim+1); j++) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } } } else // air particle or we are not following streamlines { for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } //flying_water_particle=false; } //calculating substep to get +- courant(substep) = 0.1 nsubsteps = 10.0 * (delta_t * pelement->GetValue(VELOCITY_OVER_ELEM_SIZE)); if (nsubsteps<1) nsubsteps=1; substep_dt = delta_t / double(nsubsteps); only_integral = 1.0;// weight;//*double(nsubsteps); position += vel*substep_dt;//weight; ///***** last_useful_vel=vel; ///***** //DONE THE FIRST LOCATION OF THE PARTICLE, NOW WE PROCEED TO STREAMLINE INTEGRATION USING THE MESH VELOCITY ////////////////////////////////////////////////////////////////////////////////////////////////////// unsigned int check_from_element_number=0; for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle. { if (KEEP_INTEGRATING==true) { is_found = FindNodeOnMesh(position, N ,pelement,elements_in_trajectory,number_of_elements_in_trajectory,check_from_element_number,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in sum_Ns_without_other_phase_nodes=0.0; if (particle_distance<0.0 && discriminate_streamlines==true) { vel_without_other_phase_nodes = ZeroVector(3); for(unsigned int j=0; j<TDim+1; j++) { if ((geom[j].FastGetSolutionStepValue(DISTANCE))<0.0) //ok. useful info! { sum_Ns_without_other_phase_nodes += N[j]; noalias(vel_without_other_phase_nodes) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel_without_other_phase_nodes) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } //if (have_water_node) //if (distance<0.0) if (sum_Ns_without_other_phase_nodes>0.01) { vel = vel_without_other_phase_nodes / sum_Ns_without_other_phase_nodes; //flying_water_particle=false; } else { particle_velocity += substep_dt * gravity; vel = particle_velocity; if (use_mesh_velocity_to_convect) { for(unsigned int j=0; j<(TDim+1); j++) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } } } else //air particle or we are not discriminating streamlines { vel_without_other_phase_nodes = ZeroVector(3); vel = ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } //flying_water_particle=false; } only_integral += 1.0; //values saved for the current time step position+=vel*substep_dt;//weight; } else { KEEP_INTEGRATING=false; break; } } else break; } } //if there's a mesh velocity, we add it at the end in a single step: position-=mesh_displacement; if (KEEP_INTEGRATING==false) (pparticle.GetEraseFlag()=true); else is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //we must save the pointer of the last element that we're in (inside the pointervector pelement) if (is_found==false) ( pparticle.GetEraseFlag()=true); pparticle.Coordinates() = position; } void AccelerateParticleUsingDeltaVelocity( PFEM_Particle_Fluid & pparticle, Element::Pointer & pelement, Geometry< Node<3> >& geom) { array_1d<double,TDim+1> N; ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const double delta_t = CurrentProcessInfo[DELTA_TIME]; array_1d<double,3> gravity = CurrentProcessInfo[GRAVITY]; //we start with the first position, then it will enter the loop. array_1d<double,3> coords = pparticle.Coordinates(); float & particle_distance = pparticle.GetDistance(); //double distance=0.0; array_1d<double,3> delta_velocity = ZeroVector(3); array_1d<double,3> delta_velocity_without_air = ZeroVector(3); array_1d<double,3> delta_velocity_without_water = ZeroVector(3); double sum_Ns_without_water_nodes = 0.0; double sum_Ns_without_air_nodes = 0.0; bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == false) { KRATOS_WATCH(N) for (int j=0 ; j!=(TDim+1); j++) if (N[j]<0.0 ) N[j]=1e-10; } if (particle_distance>0.0) //no problem. air { for(unsigned int j=0; j<(TDim+1); j++) { //just for air if ((geom[j].FastGetSolutionStepValue(DISTANCE))>0.0) { noalias(delta_velocity_without_water) += geom[j].FastGetSolutionStepValue(DELTA_VELOCITY)*N[j]; sum_Ns_without_air_nodes += N[j]; } //both air and water noalias(delta_velocity) += geom[j].FastGetSolutionStepValue(DELTA_VELOCITY)*N[j]; } if (sum_Ns_without_water_nodes>0.01) { //delta_velocity = delta_velocity_without_water/sum_Ns_without_water_nodes ; //commented = using all the velocities always! } //else we use the complete field } else //water particle { for(unsigned int j=0; j<(TDim+1); j++) { if ((geom[j].FastGetSolutionStepValue(DISTANCE))<0.0) { noalias(delta_velocity_without_air) += geom[j].FastGetSolutionStepValue(DELTA_VELOCITY)*N[j]; sum_Ns_without_air_nodes += N[j]; } noalias(delta_velocity) += geom[j].FastGetSolutionStepValue(DELTA_VELOCITY)*N[j]; } if (sum_Ns_without_air_nodes>0.01) { delta_velocity = delta_velocity_without_air/sum_Ns_without_air_nodes ; } else { if (mDENSITY_WATER>(10.0*mDENSITY_AIR)) { delta_velocity=gravity*(1.0-mDENSITY_AIR/mDENSITY_WATER)*delta_t; } } } pparticle.GetVelocity() = pparticle.GetVelocity() + delta_velocity; } void MoveParticle_inverse_way( PFEM_Particle_Fluid & pparticle, Element::Pointer & pelement, //NOT A REFERENCE!! WE SHALL NOT OVERWRITE THE ELEMENT IT BELONGS TO! ResultIteratorType result_begin, const unsigned int MaxNumberOfResults, const bool use_mesh_velocity_to_convect) { ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; unsigned int nsubsteps; double substep_dt; bool KEEP_INTEGRATING=false; bool is_found; array_1d<double,3> vel; array_1d<double,3> particle_vel; array_1d<double,3> position; array_1d<double,3> mid_position; array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. position = pparticle.Coordinates(); // + (pparticle)->FastGetSolutionStepValue(DISPLACEMENT); //initial coordinates float & distance = pparticle.GetDistance(); double only_integral = 0.0 ; is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { KEEP_INTEGRATING=true; Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel=ZeroVector(3); particle_vel=ZeroVector(3); distance=0.0; for(unsigned int j=0; j<(TDim+1); j++) { distance += geom[j].FastGetSolutionStepValue(DISTANCE)*N(j); noalias(particle_vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; if (use_mesh_velocity_to_convect) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } //calculating substep to get +- courant(substep) = 1/4 nsubsteps = 10.0 * (delta_t * pelement->GetValue(VELOCITY_OVER_ELEM_SIZE)); if (nsubsteps<1) nsubsteps=1; substep_dt = delta_t / double(nsubsteps); only_integral = 1.0;// weight;//*double(nsubsteps); position -= vel*substep_dt;//weight; for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle. { if (KEEP_INTEGRATING==true) { is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in vel=ZeroVector(3); particle_vel=ZeroVector(3); distance=0.0; for(unsigned int j=0; j<(TDim+1); j++) { noalias(particle_vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j] ; noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j] ; distance += geom[j].FastGetSolutionStepValue(DISTANCE)*N(j); if (use_mesh_velocity_to_convect) noalias(vel) -= geom[j].FastGetSolutionStepValue(MESH_VELOCITY)*N[j]; } only_integral += 1.0;//weight ; //values saved for the current time step position-=vel*substep_dt;//weight; } else KEEP_INTEGRATING=false; } } ///COMMENT TO GET A A CONTINOUS DISTANCE FUNCTION FIELD!!!!! if(distance>0.0) { //if(distance<2.0) distance=1.0; //else // distance=3.0; } else distance=-1.0; pparticle.GetVelocity()=particle_vel; } //else {KRATOS_WATCH(position); } } void OverwriteParticleDataUsingTopographicDomain( PFEM_Particle_Fluid & pparticle, Element::Pointer & pelement, array_1d<double,3> domains_offset, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. array_1d<double,3> coords = pparticle.Coordinates()+domains_offset; float & particle_distance = pparticle.GetDistance(); bool is_found = FindNodeOnTopographicMesh(coords, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is: if (is_found) //it is part of the solid topographic domain { particle_distance= -1.0; } else //it is outside the topographic domain, therefore it is air or whatever it means { particle_distance= 1.0; } pparticle.GetVelocity() = ZeroVector(3); } ///this function should find the element into which a given node is located ///and return a pointer to the element and the vector containing the ///shape functions that define the postion within the element ///if "false" is devolved the element is not found bool FindNodeOnMesh( array_1d<double,3>& position, array_1d<double,TDim+1>& N, Element::Pointer & pelement, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; const array_1d<double,3>& coords = position; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N); if(is_found_1 == true) //that was easy! { return true; } //to begin with we check the neighbour elements; it is a bit more expensive GlobalPointersVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS); //the first we check is the one that has negative shape function, because it means it went outside in this direction: //commented, it is not faster than simply checking all the neighbours (branching) /* unsigned int checked_element=0; for (unsigned int i=0;i!=(TDim+1);i++) { if (N[i]<0.0) { checked_element=i; Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); N=aux_N; return true; } break; } } */ //we check all the neighbour elements for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if (is_found_2) { pelement = neighb_elems[i].shared_from_this(); return true; } } //if checking all the neighbour elements did not work, we have to use the bins //ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults ); if(results_found>0){ //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == true) { pelement=Element::Pointer((*(result_begin+i))); return true; } } } //if nothing worked, then: //not found case return false; } // VERSION INCLUDING PREDEFINED ELEMENTS FOLLOWING A TRAJECTORY bool FindNodeOnMesh( array_1d<double,3>& position, array_1d<double,TDim+1>& N, Element::Pointer & pelement, GlobalPointersVector< Element >& elements_in_trajectory, unsigned int & number_of_elements_in_trajectory, unsigned int & check_from_element_number, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; const array_1d<double,3>& coords = position; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N); if(is_found_1 == true) { return true; //that was easy! } //if it was not found in the first element, we can proceed to check in the following elements (in the trajectory defined by previous particles that started from the same element. for (unsigned int i=(check_from_element_number);i!=number_of_elements_in_trajectory;i++) { Geometry<Node<3> >& geom = elements_in_trajectory[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement = elements_in_trajectory[i].shared_from_this(); N=aux_N; check_from_element_number = i+1 ; //now i element matches pelement, so to avoid cheching twice the same element we send the counter to the following element. return true; } } //now we check the neighbour elements: GlobalPointersVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS); //the first we check is the one that has negative shape function, because it means it went outside in this direction: //commented, it is not faster than simply checking all the neighbours (branching) /* unsigned int checked_element=0; for (unsigned int i=0;i!=(TDim+1);i++) { if (N[i]<0.0) { checked_element=i; Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N); if (is_found_2) { pelement=Element::Pointer(((neighb_elems(i)))); N=aux_N; return true; } break; } } */ //we check all the neighbour elements for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if (is_found_2) { pelement = neighb_elems[i].shared_from_this(); if (number_of_elements_in_trajectory<20) { elements_in_trajectory(number_of_elements_in_trajectory)=pelement; number_of_elements_in_trajectory++; check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list } return true; } } //if checking all the neighbour elements did not work, we have to use the bins //ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults ); if(results_found>0) { //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == true) { pelement=Element::Pointer((*(result_begin+i))); if (number_of_elements_in_trajectory<20) { elements_in_trajectory(number_of_elements_in_trajectory)=pelement; number_of_elements_in_trajectory++; check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list } return true; } } } //not found case return false; } ///this function should find the element into which a given node is located ///and return a pointer to the element and the vector containing the ///shape functions that define the postion within the element ///if "false" is devolved the element is not found bool FindNodeOnTopographicMesh( array_1d<double,3>& position, array_1d<double,TDim+1>& N, Element::Pointer & pelement, ResultIteratorType result_begin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; const array_1d<double,3>& coords = position; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. //ModelPart::ElementsContainerType::iterator i = mr_model_part.ElementsBegin()+last_element; Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N); if(is_found_1 == true) { //pelement = (*(i)); return true; } //to begin with we check the neighbour elements: GlobalPointersVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS); for (unsigned int i=0;i!=(neighb_elems.size());i++) { Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if (is_found_2) { pelement = neighb_elems[i].shared_from_this(); return true; } } //ask to the container for the list of candidate elements SizeType results_found = mpTopographicBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults ); //KRATOS_WATCH(results_found) if(results_found>0){ //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N); if(is_found == true) { pelement=Element::Pointer((*(result_begin+i))); return true; } } } //not found case return false; } //*************************************** //*************************************** inline bool CalculatePosition(Geometry<Node < 3 > >&geom, const double xc, const double yc, const double zc, array_1d<double, 3 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double area = CalculateVol(x0, y0, x1, y1, x2, y2); double inv_area = 0.0; if (area == 0.0) { KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", ""); } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; //KRATOS_WATCH(N); if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //////////// //using the pre loaded nodal coordinates inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions, const double xc, const double yc, const double zc, array_1d<double, 3 > & N ) { const double& x0 = nodes_positions[0]; const double& y0 = nodes_positions[1]; const double& x1 = nodes_positions[3]; const double& y1 = nodes_positions[4]; const double& x2 = nodes_positions[6]; const double& y2 = nodes_positions[7]; double area = CalculateVol(x0, y0, x1, y1, x2, y2); double inv_area = 0.0; if (area == 0.0) { KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", ""); } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; //KRATOS_WATCH(N); if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //*************************************** //*************************************** inline bool CalculatePosition(Geometry<Node < 3 > >&geom, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); double inv_vol = 0.0; if (vol < 0.000000000000000000000000000001) { KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", ""); } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } /////////////////// //using the pre loaded nodal coordinates inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { const double& x0 = nodes_positions[0]; const double& y0 = nodes_positions[1]; const double& z0 = nodes_positions[2]; const double& x1 = nodes_positions[3]; const double& y1 = nodes_positions[4]; const double& z1 = nodes_positions[5]; const double& x2 = nodes_positions[6]; const double& y2 = nodes_positions[7]; const double& z2 = nodes_positions[8]; const double& x3 = nodes_positions[9]; const double& y3 = nodes_positions[10]; const double& z3 = nodes_positions[11]; double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); double inv_vol = 0.0; if (vol < 0.000000000000000000000000000001) { KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", ""); } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } inline double CalculateVol(const double x0, const double y0, const double x1, const double y1, const double x2, const double y2 ) { return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0)); } //*************************************** //*************************************** inline double CalculateVol(const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2, const double x3, const double y3, const double z3 ) { double x10 = x1 - x0; double y10 = y1 - y0; double z10 = z1 - z0; double x20 = x2 - x0; double y20 = y2 - y0; double z20 = z2 - z0; double x30 = x3 - x0; double y30 = y3 - y0; double z30 = z3 - z0; double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30; return detJ * 0.1666666666666666666667; } void ComputeGaussPointPositions_4(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N) { double one_third = 1.0 / 3.0; double one_sixt = 0.15; //1.0 / 6.0; double two_third = 0.7; //2.0 * one_third; N(0, 0) = one_sixt; N(0, 1) = one_sixt; N(0, 2) = two_third; N(1, 0) = two_third; N(1, 1) = one_sixt; N(1, 2) = one_sixt; N(2, 0) = one_sixt; N(2, 1) = two_third; N(2, 2) = one_sixt; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; //first pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X(); pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y(); pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z(); //second pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X(); pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y(); pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z(); //third pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X(); pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y(); pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); } void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N) //2d { double one_third = 1.0 / 3.0; double one_eight = 0.12; //1.0 / 6.0; double three_quarters = 0.76; //2.0 * one_third; N(0, 0) = one_eight; N(0, 1) = one_eight; N(0, 2) = three_quarters; N(1, 0) = three_quarters; N(1, 1) = one_eight; N(1, 2) = one_eight; N(2, 0) = one_eight; N(2, 1) = three_quarters; N(2, 2) = one_eight; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; N(4, 0) = one_eight; N(4, 1) = 0.44; N(4, 2) = 0.44; N(5, 0) = 0.44; N(5, 1) = one_eight; N(5, 2) = 0.44; N(6, 0) = 0.44; N(6, 1) = 0.44; N(6, 2) = one_eight; //first pos(0, 0) = one_eight * geom[0].X() + one_eight * geom[1].X() + three_quarters * geom[2].X(); pos(0, 1) = one_eight * geom[0].Y() + one_eight * geom[1].Y() + three_quarters * geom[2].Y(); pos(0, 2) = one_eight * geom[0].Z() + one_eight * geom[1].Z() + three_quarters * geom[2].Z(); //second pos(1, 0) = three_quarters * geom[0].X() + one_eight * geom[1].X() + one_eight * geom[2].X(); pos(1, 1) = three_quarters * geom[0].Y() + one_eight * geom[1].Y() + one_eight * geom[2].Y(); pos(1, 2) = three_quarters * geom[0].Z() + one_eight * geom[1].Z() + one_eight * geom[2].Z(); //third pos(2, 0) = one_eight * geom[0].X() + three_quarters * geom[1].X() + one_eight * geom[2].X(); pos(2, 1) = one_eight * geom[0].Y() + three_quarters * geom[1].Y() + one_eight * geom[2].Y(); pos(2, 2) = one_eight * geom[0].Z() + three_quarters * geom[1].Z() + one_eight * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); //fifth pos(4, 0) = one_eight * geom[0].X() + 0.44 * geom[1].X() + 0.44 * geom[2].X(); pos(4, 1) = one_eight * geom[0].Y() + 0.44 * geom[1].Y() + 0.44 * geom[2].Y(); pos(4, 2) = one_eight * geom[0].Z() + 0.44 * geom[1].Z() + 0.44 * geom[2].Z(); //sixth pos(5, 0) = 0.44 * geom[0].X() + one_eight * geom[1].X() + 0.44 * geom[2].X(); pos(5, 1) = 0.44 * geom[0].Y() + one_eight * geom[1].Y() + 0.44 * geom[2].Y(); pos(5, 2) = 0.44 * geom[0].Z() + one_eight * geom[1].Z() + 0.44 * geom[2].Z(); //seventh pos(6, 0) = 0.44 * geom[0].X() + 0.44 * geom[1].X() + one_eight * geom[2].X(); pos(6, 1) = 0.44 * geom[0].Y() + 0.44 * geom[1].Y() + one_eight * geom[2].Y(); pos(6, 2) = 0.44 * geom[0].Z() + 0.44 * geom[1].Z() + one_eight * geom[2].Z(); } void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 9, 3 > & pos,BoundedMatrix<double, 9, 4 > & N) //3D { double one_quarter = 0.25; double small_fraction = 0.1; //1.0 / 6.0; double big_fraction = 0.7; //2.0 * one_third; double mid_fraction = 0.3; //2.0 * one_third; N(0, 0) = big_fraction; N(0, 1) = small_fraction; N(0, 2) = small_fraction; N(0, 3) = small_fraction; N(1, 0) = small_fraction; N(1, 1) = big_fraction; N(1, 2) = small_fraction; N(1, 3) = small_fraction; N(2, 0) = small_fraction; N(2, 1) = small_fraction; N(2, 2) = big_fraction; N(2, 3) = small_fraction; N(3, 0) = small_fraction; N(3, 1) = small_fraction; N(3, 2) = small_fraction; N(3, 3) = big_fraction; N(4, 0) = one_quarter; N(4, 1) = one_quarter; N(4, 2) = one_quarter; N(4, 3) = one_quarter; N(5, 0) = small_fraction; N(5, 1) = mid_fraction; N(5, 2) = mid_fraction; N(5, 3) = mid_fraction; N(6, 0) = mid_fraction; N(6, 1) = small_fraction; N(6, 2) = mid_fraction; N(6, 3) = mid_fraction; N(7, 0) = mid_fraction; N(7, 1) = mid_fraction; N(7, 2) = small_fraction; N(7, 3) = mid_fraction; N(8, 0) = mid_fraction; N(8, 1) = mid_fraction; N(8, 2) = mid_fraction; N(8, 3) = small_fraction; pos=ZeroMatrix(9,3); for (unsigned int i=0; i!=4; i++) //going through the 4 nodes { array_1d<double, 3 > & coordinates = geom[i].Coordinates(); for (unsigned int j=0; j!=9; j++) //going through the 9 particles { for (unsigned int k=0; k!=3; k++) //x,y,z pos(j,k) += N(j,i) * coordinates[k]; } } } void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 3, 3 > & pos,BoundedMatrix<double, 3, 3 > & N) //2D { N(0, 0) = 0.5; N(0, 1) = 0.25; N(0, 2) = 0.25; N(1, 0) = 0.25; N(1, 1) = 0.5; N(1, 2) = 0.25; N(2, 0) = 0.25; N(2, 1) = 0.25; N(2, 2) = 0.5; //first pos(0, 0) = 0.5 * geom[0].X() + 0.25 * geom[1].X() + 0.25 * geom[2].X(); pos(0, 1) = 0.5 * geom[0].Y() + 0.25 * geom[1].Y() + 0.25 * geom[2].Y(); pos(0, 2) = 0.5 * geom[0].Z() + 0.25 * geom[1].Z() + 0.25 * geom[2].Z(); //second pos(1, 0) = 0.25 * geom[0].X() + 0.5 * geom[1].X() + 0.25 * geom[2].X(); pos(1, 1) = 0.25 * geom[0].Y() + 0.5 * geom[1].Y() + 0.25 * geom[2].Y(); pos(1, 2) = 0.25 * geom[0].Z() + 0.5 * geom[1].Z() + 0.25 * geom[2].Z(); //third pos(2, 0) = 0.25 * geom[0].X() + 0.25 * geom[1].X() + 0.5 * geom[2].X(); pos(2, 1) = 0.25 * geom[0].Y() + 0.25 * geom[1].Y() + 0.5 * geom[2].Y(); pos(2, 2) = 0.25 * geom[0].Z() + 0.25 * geom[1].Z() + 0.5 * geom[2].Z(); } void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 4, 3 > & pos,BoundedMatrix<double, 4, 4 > & N) //3D { //creating 4 particles, each will be closer to a node and equidistant to the other nodes N(0, 0) = 0.4; N(0, 1) = 0.2; N(0, 2) = 0.2; N(0, 3) = 0.2; N(1, 0) = 0.2; N(1, 1) = 0.4; N(1, 2) = 0.2; N(1, 3) = 0.2; N(2, 0) = 0.2; N(2, 1) = 0.2; N(2, 2) = 0.4; N(2, 3) = 0.2; N(3, 0) = 0.2; N(3, 1) = 0.2; N(3, 2) = 0.2; N(3, 3) = 0.4; pos=ZeroMatrix(4,3); for (unsigned int i=0; i!=4; i++) //going through the 4 nodes { array_1d<double, 3 > & coordinates = geom[i].Coordinates(); for (unsigned int j=0; j!=4; j++) //going through the 4 particles { for (unsigned int k=0; k!=3; k++) //x,y,z pos(j,k) += N(j,i) * coordinates[k]; } } } void ComputeGaussPointPositions_45(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 45, 3 > & pos,BoundedMatrix<double, 45, 3 > & N) { //std::cout << "NEW ELEMENT" << std::endl; unsigned int counter=0; for (unsigned int i=0; i!=9;i++) { for (unsigned int j=0; j!=(9-i);j++) { N(counter,0)=0.05+double(i)*0.1; N(counter,1)=0.05+double(j)*0.1; N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z(); //std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl; counter++; } } } void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 15, 3 > & pos,BoundedMatrix<double, 15, 3 > & N) //2D { //std::cout << "NEW ELEMENT" << std::endl; unsigned int counter=0; for (unsigned int i=0; i!=5;i++) { for (unsigned int j=0; j!=(5-i);j++) { N(counter,0)=0.05+double(i)*0.2; N(counter,1)=0.05+double(j)*0.2; N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z(); //std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl; counter++; } } } void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 20, 3 > & pos,BoundedMatrix<double, 20, 4 > & N) //3D { //std::cout << "NEW ELEMENT" << std::endl; //double total; double fraction_increment; unsigned int counter=0; for (unsigned int i=0; i!=4;i++) //going to build a particle "pyramid"(tetrahedra) by layers. the first layer will be made by a triangle of 4 base X 4 height. since it is a triangle, it means it will have 10 particles { //std::cout << "inside i" << i << std::endl; for (unsigned int j=0; j!=(4-i);j++) { //std::cout << "inside j" << j << std::endl; for (unsigned int k=0; k!=(4-i-j);k++) { //std::cout << "inside k" << k << std::endl; N(counter,0)= 0.27 * ( 0.175 + double(i) ) ; //this is our "surface" in which we will build each layer, so we must construct a triangle using what's left of the shape functions total (a total of 1) //total = 1.0 - N(counter,0); fraction_increment = 0.27; // N(counter,1)=fraction_increment * (0.175 + double(j)); N(counter,2)=fraction_increment * (0.175 + double(k)); N(counter,3)=1.0 - ( N(counter,0)+ N(counter,1) + N(counter,2) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X() + N(counter,3) * geom[3].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y() + N(counter,3) * geom[3].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z() + N(counter,3) * geom[3].Z(); //std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl; counter++; } } } } // Bubble Sort Function for Descending Order void BubbleSort(array_1d<double,7> &distances , array_1d<int,7 > &positions, unsigned int & arrange_number) { int i, j; bool flag = true; // set flag to 1 to start first pass double temp; // holding variable int temp_position; int numLength = arrange_number; for(i = 1; (i <= numLength) && flag; i++) { flag = false; for (j=0; j < (numLength -1); j++) { if (distances[j+1] < distances[j]) // descending order simply changes to > { temp = distances[j]; // swap elements distances[j] = distances[j+1]; distances[j+1] = temp; temp_position = positions[j]; //swap positions positions[j] = positions[j+1]; positions[j+1] = temp_position; flag = true; // indicates that a swap occurred. } } } return; //arrays are passed to functions by address; nothing is returned } void BubbleSort(array_1d<double,9> &distances , array_1d<int,9 > &positions, unsigned int & arrange_number) { int i, j; bool flag = true; // set flag to 1 to start first pass double temp; // holding variable int temp_position; int numLength = arrange_number; for(i = 1; (i <= numLength) && flag; i++) { flag = false; for (j=0; j < (numLength -1); j++) { if (distances[j+1] < distances[j]) // descending order simply changes to > { temp = distances[j]; // swap elements distances[j] = distances[j+1]; distances[j+1] = temp; temp_position = positions[j]; //swap positions positions[j] = positions[j+1]; positions[j+1] = temp_position; flag = true; // indicates that a swap occurred. } } } return; //arrays are passed to functions by address; nothing is returned } template<class T> bool InvertMatrix(const T& input, T& inverse) { typedef permutation_matrix<std::size_t> pmatrix; // create a working copy of the input T A(input); // create a permutation matrix for the LU-factorization pmatrix pm(A.size1()); // perform LU-factorization int res = lu_factorize(A, pm); if (res != 0) return false; // create identity matrix of "inverse" inverse.assign(identity_matrix<double> (A.size1())); // backsubstitute to get the inverse lu_substitute(A, pm, inverse); return true; } bool InvertMatrix3x3(const BoundedMatrix<double, TDim+1 , TDim+1 >& A, BoundedMatrix<double, TDim+1 , TDim+1 >& result) { double determinant = +A(0,0)*(A(1,1)*A(2,2)-A(2,1)*A(1,2)) -A(0,1)*(A(1,0)*A(2,2)-A(1,2)*A(2,0)) +A(0,2)*(A(1,0)*A(2,1)-A(1,1)*A(2,0)); double invdet = 1/determinant; result(0,0) = (A(1,1)*A(2,2)-A(2,1)*A(1,2))*invdet; result(1,0) = -(A(0,1)*A(2,2)-A(0,2)*A(2,1))*invdet; result(2,0) = (A(0,1)*A(1,2)-A(0,2)*A(1,1))*invdet; result(0,1) = -(A(1,0)*A(2,2)-A(1,2)*A(2,0))*invdet; result(1,1) = (A(0,0)*A(2,2)-A(0,2)*A(2,0))*invdet; result(2,1) = -(A(0,0)*A(1,2)-A(1,0)*A(0,2))*invdet; result(0,2) = (A(1,0)*A(2,1)-A(2,0)*A(1,1))*invdet; result(1,2) = -(A(0,0)*A(2,1)-A(2,0)*A(0,1))*invdet; result(2,2) = (A(0,0)*A(1,1)-A(1,0)*A(0,1))*invdet; return true; } ModelPart& mr_model_part; ModelPart* mtopographic_model_part_pointer; array_1d<double, 3 > mcalculation_domain_complete_displacement; array_1d<double, 3 > mcalculation_domain_added_displacement; bool mintialized_transfer_tool; bool muse_mesh_velocity_to_convect; int m_nparticles; int mnelems; double mDENSITY_WATER; double mDENSITY_AIR; //vector<double> mareas_vector; UNUSED SO COMMENTED int max_nsubsteps; double max_substep_dt; int mmaximum_number_of_particles; std::vector< PFEM_Particle_Fluid > mparticles_vector; //Point<3> int mlast_elem_id; bool modd_timestep; bool mparticle_printing_tool_initialized; unsigned int mfilter_factor; unsigned int mlast_node_id; //ModelPart& mr_particle_model_part; vector<int> mnumber_of_particles_in_elems; vector<int> mnumber_of_particles_in_elems_aux; vector<ParticlePointerVector*> mpointers_to_particle_pointers_vectors; typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic; typename BinsObjectDynamic<Configure>::Pointer mpTopographicBinsObjectDynamic; void CalculateNormal(Geometry<Node<3> >& pGeometry, array_1d<double,3>& An ); }; template<> void MoveParticleUtilityPFEM2<2>::CalculateNormal(Geometry<Node<3> >& pGeometry, array_1d<double,3>& An ) { array_1d<double,2> v1; v1[0] = pGeometry[1].X() - pGeometry[0].X(); v1[1] = pGeometry[1].Y() - pGeometry[0].Y(); An[0] = -v1[1]; An[1] = v1[0]; An[2] = 0.0; //now checking orientation using the normal: const unsigned int NumNodes = 2; array_1d<double,3> nodal_normal = ZeroVector(3); for (unsigned int iNode = 0; iNode < NumNodes; ++iNode) nodal_normal += pGeometry[iNode].FastGetSolutionStepValue(NORMAL); double dot_prod = nodal_normal[0]*An[0] + nodal_normal[1]*An[1]; if (dot_prod<0.0) { //std::cout << "inverting the normal" << std::endl; An *= -1.0; // inverting the direction of the normal!!! } } template<> void MoveParticleUtilityPFEM2<3>::CalculateNormal(Geometry<Node<3> >& pGeometry, array_1d<double,3>& An ) { array_1d<double,3> v1,v2; v1[0] = pGeometry[1].X() - pGeometry[0].X(); v1[1] = pGeometry[1].Y() - pGeometry[0].Y(); v1[2] = pGeometry[1].Z() - pGeometry[0].Z(); v2[0] = pGeometry[2].X() - pGeometry[0].X(); v2[1] = pGeometry[2].Y() - pGeometry[0].Y(); v2[2] = pGeometry[2].Z() - pGeometry[0].Z(); MathUtils<double>::CrossProduct(An,v1,v2); An *= 0.5; //now checking orientation using the normal: const unsigned int NumNodes = 3; array_1d<double,3> nodal_normal = ZeroVector(3); for (unsigned int iNode = 0; iNode < NumNodes; ++iNode) nodal_normal += pGeometry[iNode].FastGetSolutionStepValue(NORMAL); double dot_prod = nodal_normal[0]*An[0] + nodal_normal[1]*An[1] + nodal_normal[2]*An[2]; if (dot_prod<0.0) { //std::cout << "inverting the normal!!" << std::endl; An *= -1.0; // inverting the direction of the normal!!! } } } // namespace Kratos. #endif // KRATOS_MOVE_PART_UTILITY_DIFF2_INCLUDED defined
round_robin_clusters.c
#define N 10 #define NB_CLUSTERS 4 #define NB_PES 16 #define MIN(x, y) ((x) < (y) ? x : y) #include <stdio.h> int get_ticket() { static int t = 0; int ticket; // In case this is called from several threads. Avoid a flush, anyway #pragma omp atomic capture ticket = t++; return ticket; } int get_data(t) { //sleep(t&1); /* In a real application, get a radar signal time slice for example */ return t*2; } int compute(int d, int cluster, int pe) { /* In a real application, do a computation on the data */ return d*cluster + pe; } int main() { /* Launch all the threads to control the clusters only once */ #pragma omp parallel num_threads(NB_CLUSTERS) { for (int i = 0; i < N; i++) { /* Execute 1 iteration per thread and there will be some ordered statement. It is useless to wait at the end of the iterations, but it looks like a nowait here break the ordered. Compiler bug? */ #pragma omp for schedule(static, 1) ordered for (int cluster = 0; cluster < NB_CLUSTERS; cluster++) { /* Get an ID in the order of the sequential iteration. Remove the ordered if it is not needed. */ int t; #pragma omp ordered { t = get_ticket(); } int d = get_data(t); int r; #pragma omp parallel for num_threads(NB_PES) reduction(+:r) for (int pe = 0; pe < NB_PES; pe++) #pragma smecy map(STHORM, cluster, pe) r += compute(d, cluster, pe); /* Produce the result in order. Remove the ordered if it is not needed. */ #pragma omp ordered { printf("Cluster %d produced %d for ticket %d\n", cluster, r, t); } } } } }
rawSHA512_fmt_plug.c
/* * This file is part of John the Ripper password cracker, * Copyright (c) 2010 by Solar Designer * based on rawMD4_fmt.c code, with trivial changes by groszek. * * Rewritten Spring 2013, JimF. SSE code added and released with the following terms: * No copyright is claimed, and the software is hereby placed in the public domain. * In case this attempt to disclaim copyright and place the software in the public * domain is deemed null and void, then the software is Copyright (c) 2011 JimF * and it is hereby released to the general public under the following * terms: * * This software may be modified, redistributed, and used for any * purpose, in source and binary forms, with or without modification. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_raw0_SHA512; #elif FMT_REGISTERS_H john_register_one(&fmt_raw0_SHA512); #else #include <stdint.h> #include "arch.h" #include "sha2.h" #include "params.h" #include "common.h" #include "johnswap.h" #include "formats.h" #include "rawSHA512_common.h" //#undef SIMD_COEF_64 //#undef SIMD_PARA_SHA512 /* * Only effective for SIMD. * Undef to disable reversing steps for benchmarking. */ #define REVERSE_STEPS #ifdef _OPENMP #ifdef SIMD_COEF_64 #ifndef OMP_SCALE #define OMP_SCALE 1024 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #endif #include <omp.h> #endif #include "simd-intrinsics.h" #include "memdbg.h" #define FORMAT_LABEL "Raw-SHA512" #define FORMAT_NAME "" #ifdef SIMD_COEF_64 #define ALGORITHM_NAME SHA512_ALGORITHM_NAME #else #define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB #endif #ifdef SIMD_COEF_64 #define PLAINTEXT_LENGTH 111 #else #define PLAINTEXT_LENGTH 125 #endif #define BINARY_SIZE 8 #define SALT_SIZE 0 #define SALT_ALIGN 1 #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #ifdef SIMD_COEF_64 #define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 ) static uint64_t (*saved_key); static uint64_t (*crypt_out); #else static int (*saved_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint64_t (*crypt_out)[DIGEST_SIZE / sizeof(uint64_t)]; #endif static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifndef SIMD_COEF_64 saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); #else saved_key = mem_calloc_align(self->params.max_keys_per_crypt * SHA_BUF_SIZ, sizeof(*saved_key), MEM_ALIGN_SIMD); crypt_out = mem_calloc_align(self->params.max_keys_per_crypt * 8, sizeof(*crypt_out), MEM_ALIGN_SIMD); #endif } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); #ifndef SIMD_COEF_64 MEM_FREE(saved_len); #endif } static void *get_binary(char *ciphertext) { static uint64_t *outw; unsigned char *out; char *p; int i; if (!outw) outw = mem_calloc_tiny(DIGEST_SIZE, BINARY_ALIGN); out = (unsigned char*)outw; p = ciphertext + TAG_LENGTH; for (i = 0; i < DIGEST_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } #ifdef SIMD_COEF_64 alter_endianity_to_BE64(out, DIGEST_SIZE/8); #ifdef REVERSE_STEPS sha512_reverse(outw); #endif #endif return out; } #ifdef SIMD_COEF_64 #define HASH_IDX (((unsigned int)index&(SIMD_COEF_64-1))+(unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64) static int get_hash_0 (int index) { return crypt_out[HASH_IDX] & PH_MASK_0; } static int get_hash_1 (int index) { return crypt_out[HASH_IDX] & PH_MASK_1; } static int get_hash_2 (int index) { return crypt_out[HASH_IDX] & PH_MASK_2; } static int get_hash_3 (int index) { return crypt_out[HASH_IDX] & PH_MASK_3; } static int get_hash_4 (int index) { return crypt_out[HASH_IDX] & PH_MASK_4; } static int get_hash_5 (int index) { return crypt_out[HASH_IDX] & PH_MASK_5; } static int get_hash_6 (int index) { return crypt_out[HASH_IDX] & PH_MASK_6; } #else static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } #endif static int binary_hash_0(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_0; } static int binary_hash_1(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_1; } static int binary_hash_2(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_2; } static int binary_hash_3(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_3; } static int binary_hash_4(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_4; } static int binary_hash_5(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_5; } static int binary_hash_6(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_6; } static void set_key(char *key, int index) { #ifdef SIMD_COEF_64 #if ARCH_ALLOWS_UNALIGNED const uint64_t *wkey = (uint64_t*)key; #else char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint64_t)); const uint64_t *wkey = is_aligned(key, sizeof(uint64_t)) ? (uint64_t*)key : (uint64_t*)strcpy(buf_aligned, key); #endif uint64_t *keybuffer = &((uint64_t*)saved_key)[(index&(SIMD_COEF_64-1)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]; uint64_t *keybuf_word = keybuffer; unsigned int len; uint64_t temp; len = 0; while((unsigned char)(temp = *wkey++)) { if (!(temp & 0xff00)) { *keybuf_word = JOHNSWAP64((temp & 0xff) | (0x80 << 8)); len++; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = JOHNSWAP64((temp & 0xffff) | (0x80 << 16)); len+=2; goto key_cleaning; } if (!(temp & 0xff000000)) { *keybuf_word = JOHNSWAP64((temp & 0xffffff) | (0x80ULL << 24)); len+=3; goto key_cleaning; } if (!(temp & 0xff00000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffff) | (0x80ULL << 32)); len+=4; goto key_cleaning; } if (!(temp & 0xff0000000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffffffULL) | (0x80ULL << 40)); len+=5; goto key_cleaning; } if (!(temp & 0xff000000000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffffffffULL) | (0x80ULL << 48)); len+=6; goto key_cleaning; } if (!(temp & 0xff00000000000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffffffffffULL) | (0x80ULL << 56)); len+=7; goto key_cleaning; } *keybuf_word = JOHNSWAP64(temp); len += 8; keybuf_word += SIMD_COEF_64; } *keybuf_word = 0x8000000000000000ULL; key_cleaning: keybuf_word += SIMD_COEF_64; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_64; } keybuffer[15*SIMD_COEF_64] = len << 3; #else int len = strlen(key); saved_len[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_len[index] = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); #endif } static char *get_key(int index) { #ifdef SIMD_COEF_64 unsigned i; uint64_t s; static char out[PLAINTEXT_LENGTH + 1]; unsigned char *wucp = (unsigned char*)saved_key; s = ((uint64_t*)saved_key)[15*SIMD_COEF_64 + (index&(SIMD_COEF_64-1)) + index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] >> 3; for (i = 0; i < (unsigned)s; i++) out[i] = wucp[ GETPOS(i, index) ]; out[i] = 0; return (char*) out; #else saved_key[index][saved_len[index]] = 0; return saved_key[index]; #endif } #ifndef REVERSE_STEPS #undef SSEi_REVERSE_STEPS #define SSEi_REVERSE_STEPS 0 #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { #ifdef SIMD_COEF_64 SIMDSHA512body(&saved_key[index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64], &crypt_out[index/SIMD_COEF_64*8*SIMD_COEF_64], NULL, SSEi_REVERSE_STEPS | SSEi_MIXED_IN); #else SHA512_CTX ctx; SHA512_Init(&ctx); SHA512_Update(&ctx, saved_key[index], saved_len[index]); SHA512_Final((unsigned char *)crypt_out[index], &ctx); #endif } return count; } static int cmp_all(void *binary, int count) { unsigned int index; for (index = 0; index < count; index++) #ifdef SIMD_COEF_64 if (((uint64_t*)binary)[0] == crypt_out[HASH_IDX]) #else if ( ((uint64_t*)binary)[0] == crypt_out[index][0] ) #endif return 1; return 0; } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_64 return ((uint64_t*)binary)[0] == crypt_out[HASH_IDX]; #else return *(uint64_t*)binary == crypt_out[index][0]; #endif } static int cmp_exact(char *source, int index) { uint64_t *binary = get_binary(source); char *key = get_key(index); SHA512_CTX ctx; uint64_t crypt_out[DIGEST_SIZE / sizeof(uint64_t)]; SHA512_Init(&ctx); SHA512_Update(&ctx, key, strlen(key)); SHA512_Final((unsigned char*)crypt_out, &ctx); #ifdef SIMD_COEF_64 alter_endianity_to_BE64(crypt_out, DIGEST_SIZE/8); #ifdef REVERSE_STEPS sha512_reverse(crypt_out); #endif #endif return !memcmp(binary, crypt_out, DIGEST_SIZE); } /* * The '0_' makes sure this format registers before others, * if ambiguous. Do not copy it for other formats. */ struct fmt_main fmt_raw0_SHA512 = { { FORMAT_LABEL, FORMAT_NAME, "SHA512 " ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD | FMT_SPLIT_UNIFIES_CASE, { NULL }, { FORMAT_TAG, XSHA512_FORMAT_TAG, NSLDAP_FORMAT_TAG }, sha512_common_tests_rawsha512_111 }, { init, done, fmt_default_reset, fmt_default_prepare, sha512_common_valid, sha512_common_split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, binary_hash_5, binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
DRB006-indirectaccess2-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two pointers have a distance of 12 (p1 - p2 = 12). They are used as base addresses for indirect array accesses using an index set (another array). The index set has two indices with a distance of 12 : indexSet[5]- indexSet[0] = 533 - 521 = 12 So there is loop carried dependence (e.g. between loops with index values of 0 and 5). We use the default loop scheduling (static even) in OpenMP. It is possible that two dependent iterations will be scheduled within a same chunk to a same thread. So there is no runtime data races. When N is 180, two iteraions with N=0 and N= 5 have loop carried dependences. For static even scheduling, we must have at least 36 threads (180/36=5 iterations) so iteration 0 and 5 will be scheduled to two different threads. Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5 */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #define N 180 int indexSet[N] = { 521, 523, 525, 527, 529, 533, // 521+12=533 547, 549, 551, 553, 555, 557, 573, 575, 577, 579, 581, 583, 599, 601, 603, 605, 607, 609, 625, 627, 629, 631, 633, 635, 651, 653, 655, 657, 659, 661, 859, 861, 863, 865, 867, 869, 885, 887, 889, 891, 893, 895, 911, 913, 915, 917, 919, 921, 937, 939, 941, 943, 945, 947, 963, 965, 967, 969, 971, 973, 989, 991, 993, 995, 997, 999, 1197, 1199, 1201, 1203, 1205, 1207, 1223, 1225, 1227, 1229, 1231, 1233, 1249, 1251, 1253, 1255, 1257, 1259, 1275, 1277, 1279, 1281, 1283, 1285, 1301, 1303, 1305, 1307, 1309, 1311, 1327, 1329, 1331, 1333, 1335, 1337, 1535, 1537, 1539, 1541, 1543, 1545, 1561, 1563, 1565, 1567, 1569, 1571, 1587, 1589, 1591, 1593, 1595, 1597, 1613, 1615, 1617, 1619, 1621, 1623, 1639, 1641, 1643, 1645, 1647, 1649, 1665, 1667, 1669, 1671, 1673, 1675, 1873, 1875, 1877, 1879, 1881, 1883, 1899, 1901, 1903, 1905, 1907, 1909, 1925, 1927, 1929, 1931, 1933, 1935, 1951, 1953, 1955, 1957, 1959, 1961, 1977, 1979, 1981, 1983, 1985, 1987, 2003, 2005, 2007, 2009, 2011, 2013}; int main (int argc, char* argv[]) { double * base = (double*) malloc(sizeof(double)* (2013+12+1)); if (base == 0) { printf ("Error in malloc(). Aborting ...\n"); return 1; } double * xa1 = base; double * xa2 = xa1 + 12; int i; // initialize segments touched by indexSet for (i =521; i<= 2025; ++i) { base[i]=0.5*i; } #pragma omp parallel for schedule(dynamic) // default static even scheduling may not trigger data race! for (i =0; i< N; ++i) { int idx = indexSet[i]; xa1[idx]+= 1.0; xa2[idx]+= 3.0; } printf("x1[999]=%f xa2[1285]=%f\n", xa1[999], xa2[1285]); free (base); return 0; }
progressive_kd_tree_index.h
#ifndef panene_progressive_kd_tree_index_h #define panene_progressive_kd_tree_index_h #include <vector> #include <algorithm> #include <random> #include <cstring> #include <cstdio> #include <iostream> #include <queue> #include <cassert> #include <map> #include <kd_tree_index.h> #ifdef BENCHMARK #include <util/timer.h> #define BENCH(x) x #else #define BENCH(x) ((void)0) #endif namespace panene { enum UpdateStatus { NoUpdate, BuildingTree, InsertingPoints }; struct TreeWeight { float addPointWeight; float updateIndexWeight; TreeWeight(float addPointWeight_, float updateIndexWeight_) : addPointWeight(addPointWeight_), updateIndexWeight(updateIndexWeight_) { } }; struct UpdateResult2 { size_t numPointsInserted; size_t addPointOps; size_t updateIndexOps; size_t addPointResult; size_t updateIndexResult; double addPointElapsed; double updateIndexElapsed; UpdateResult2() = default; UpdateResult2( size_t addPointOps_, size_t updateIndexOps_, size_t addPointResult_, size_t updateIndexResult_, size_t numPointsInserted_, double addPointElapsed_, double updateIndexElapsed_) : numPointsInserted(numPointsInserted_), addPointOps(addPointOps_), updateIndexOps(updateIndexOps_), addPointResult(addPointResult_), updateIndexResult(updateIndexResult_), addPointElapsed(addPointElapsed_), updateIndexElapsed(updateIndexElapsed_) { } friend std::ostream& operator<<(std::ostream& os, const UpdateResult2& obj) { os << "UpdateResult2(addPointOps: " << obj.addPointResult << " / " << obj.addPointOps << ", " << "updateIndexOps: " << obj.updateIndexResult << " / " << obj.updateIndexOps << ", numPointsInserted: " << obj.numPointsInserted << ")"; return os; } }; template <typename DataSource> class ProgressiveKDTreeIndex : public KDTreeIndex<DataSource> { USE_KDTREE_INDEX_SYMBOLS typedef DataSource DataSourceT; public: ProgressiveKDTreeIndex(DataSource *dataSource_, IndexParams indexParams_, TreeWeight weight_ = TreeWeight(0.3, 0.7), const float reconstructionWeight_ = .25f) : KDTreeIndex<DataSource>(dataSource_, indexParams_, Distance()), weight(weight_), reconstructionWeight(reconstructionWeight_) { } size_t addPoints(size_t ops) { size_t oldSize = size; size += ops; if (size > dataSource->size()) size = dataSource->size(); if (oldSize == 0) { // for the first time, build the index as we did in the non-progressive version. buildIndex(); return ops; } else { for (size_t i = oldSize; i < size; ++i) { for (size_t j = 0; j < numTrees; ++j) { trees[j]->size++; addPointToTree(trees[j], trees[j]->root, i, 0); } } if (updateStatus == UpdateStatus::InsertingPoints) { for (size_t i = oldSize; i < size && sizeAtUpdate < size; ++i) { ongoingTree->size++; addPointToTree(ongoingTree, ongoingTree->root, sizeAtUpdate++, 0); } } return size - oldSize; } } void beginUpdate() { updateStatus = UpdateStatus::BuildingTree; sizeAtUpdate = size; ids.resize(sizeAtUpdate); for (size_t i = 0; i < sizeAtUpdate; ++i) ids[i] = int(i); std::random_shuffle(ids.begin(), ids.end()); ongoingTree = new KDTree<NodePtr>(dataSource->capacity()); ongoingTree->root = new(pool) Node(ongoingTree); std::queue<NodeSplit> empty; queue = empty; queue.push(NodeSplit(ongoingTree->root, &ids[0], sizeAtUpdate, 1)); ongoingTree->size = sizeAtUpdate; } size_t update(int ops) { int updatedCount = 0; while ((ops == -1 || updatedCount < ops) && !queue.empty()) { NodeSplit nodeSplit = queue.front(); queue.pop(); #if DEBUG std::cerr << "updatedCount " << updatedCount << std::endl; #endif NodePtr node = nodeSplit.node; IDType *begin = nodeSplit.begin; int count = nodeSplit.count; int depth = nodeSplit.depth; #if DEBUG std::cerr << begin << " " << count << std::endl; #endif // At this point, nodeSplit the two children of nodeSplit are nullptr if (count == 1) { node->child1 = node->child2 = NULL; /* Mark as leaf node. */ node->id = *begin; /* Store index of this vec. */ // TODO id of vec ongoingTree->setInsertionLog(node->id, 1, depth); } else { int idx; int cutfeat; DistanceType cutval; meanSplit(begin, count, idx, cutfeat, cutval); #if DEBUG std::cerr << "cut index: " << idx << " cut count: " << count << std::endl; #endif node->divfeat = cutfeat; node->divval = cutval; node->child1 = new(pool) Node(ongoingTree); node->child2 = new(pool) Node(ongoingTree); queue.push(NodeSplit(node->child1, begin, idx, depth + 1)); queue.push(NodeSplit(node->child2, begin + idx, count - idx, depth + 1)); } updatedCount += 1; // count; // std::min(1, count / 2); } if (updateStatus == UpdateStatus::BuildingTree && queue.empty()) { updateStatus = UpdateStatus::InsertingPoints; } if (updateStatus == UpdateStatus::InsertingPoints) { if (ongoingTree->size < size) { // insert points from sizeAtUpdate to size while (ongoingTree->size < size && (ops == -1 || updatedCount < ops)) { ongoingTree->size++; addPointToTree(ongoingTree, ongoingTree->root, sizeAtUpdate, 0); sizeAtUpdate++; updatedCount++; } } if (ongoingTree->size >= size) { // finished creating a new tree ongoingTree->cost = ongoingTree->computeCost(); size_t victimId = 0; float maxImbalance = trees[0]->computeImbalance(); // find the most unbalanced one for (size_t i = 1; i < numTrees; ++i) { float imbalance = trees[i]->computeImbalance(); if (maxImbalance < imbalance) { maxImbalance = imbalance; victimId = i; } } // get the victim auto victim = trees[victimId]; // replace the victim with the newly created tree delete victim; trees[victimId] = ongoingTree; // reset the sizeAtUpdate sizeAtUpdate = 0; updateStatus = UpdateStatus::NoUpdate; } } return updatedCount; } UpdateResult2 run(size_t ops) { size_t addPointOps = 0, updateIndexOps = 0; size_t addPointResult = 0, updateIndexResult = 0; double addPointElapsed = 0, updateIndexElapsed = 0; if (updateStatus != UpdateStatus::NoUpdate) { addPointOps = (size_t)(ops * weight.addPointWeight); updateIndexOps = (size_t)(ops * weight.updateIndexWeight); } else { addPointOps = ops; } BENCH(Timer timer); BENCH(timer.begin()); if (addPointOps > 0) { addPointResult = addPoints(addPointOps); } if (addPointResult == 0) { // if we added all points, put all operations to update index weight.updateIndexWeight += weight.addPointWeight; weight.addPointWeight = 0; updateIndexOps = ops; addPointOps = 0; } size_t numPointsInserted = size; BENCH(addPointElapsed = timer.end()); if (updateStatus != NoUpdate) { BENCH(timer.begin()); updateIndexResult = update(updateIndexOps); BENCH(updateIndexElapsed = timer.end()); } return UpdateResult2( addPointOps, updateIndexOps, addPointResult, updateIndexResult, numPointsInserted, addPointElapsed, updateIndexElapsed); } void checkBeginUpdate() { if (updateStatus == UpdateStatus::NoUpdate) { float updateCost = (float)std::log2(size) * size; if (queryLoss > updateCost * reconstructionWeight) { beginUpdate(); queryLoss = 0; } } } void knnSearch( const IDType &qid, ResultSet<IDType, DistanceType> &resultSet, size_t knn, const SearchParams& params) { std::vector<ElementType> vector(dim); dataSource->get(qid, vector); float costSum = findNeighbors(vector, resultSet, params); size_t ideal = std::log2(size); queryLoss += costSum - numTrees * ideal; checkBeginUpdate(); } // this needs to be improved void knnSearch( const std::vector<IDType> qids, std::vector<ResultSet<IDType, DistanceType>> &resultSets, size_t knn, const SearchParams& params) { std::vector<std::vector<ElementType>> vectors(qids.size()); for (size_t i = 0; i < qids.size(); ++i) { vectors[i].resize(dim); dataSource->get(qids[i], vectors[i]); } knnSearch(vectors, resultSets, knn, params); } void knnSearch( const std::vector<std::vector<ElementType>> &vectors, std::vector<ResultSet<IDType, DistanceType>> &resultSets, size_t knn, const SearchParams& params) { resultSets.resize(vectors.size()); float costSum = 0; #pragma omp parallel num_threads(params.cores) { #pragma omp for schedule(static) reduction(+:costSum) for (int i = 0; i < (int)vectors.size(); i++) { resultSets[i] = ResultSet<IDType, DistanceType>(knn); costSum += findNeighbors(vectors[i], resultSets[i], params); } } queryLoss += costSum; checkBeginUpdate(); } // alias for knnSearch(points) since Cython does not seem to support method overloading void knnSearchVec( const std::vector<std::vector<ElementType>> &vectors, std::vector<ResultSet<IDType, DistanceType>> &resultSets, size_t knn, const SearchParams& params) { knnSearch(vectors, resultSets, knn, params); } protected: void buildIndex() { std::vector<IDType> ids(size); for (size_t i = 0; i < size; ++i) { ids[i] = IDType(i); } for (size_t i = 0; i < numTrees; ++i) { std::random_shuffle(ids.begin(), ids.end()); trees[i]->root = divideTree(trees[i], &ids[0], size, 1); trees[i]->size = size; trees[i]->cost = trees[i]->computeCost(); } } void addPointToTree(KDTree<NodePtr>* tree, NodePtr node, IDType id, int depth) { if ((node->child1 == NULL) && (node->child2 == NULL)) { // if leaf size_t nodeId = node->id; size_t divfeat = dataSource->findDimWithMaxSpan(id, nodeId); NodePtr left = new(pool) Node(tree); left->child1 = left->child2 = NULL; NodePtr right = new(pool) Node(tree); right->child1 = right->child2 = NULL; ElementType pointValue = dataSource->get(id, divfeat); ElementType leafValue = dataSource->get(node->id, divfeat); if (pointValue < leafValue) { left->id = id; right->id = node->id; } else { left->id = node->id; right->id = id; } left->divfeat = right->divfeat = -1; node->divfeat = divfeat; node->divval = (pointValue + leafValue) / 2; node->child1 = left; node->child2 = right; // incrementally update imbalance tree->setInsertionLog(id, 0, depth + 2); tree->incrementFreqByOne(id); tree->incrementFreqAndDepthByOne(nodeId); } else { if (dataSource->get(id, node->divfeat) < node->divval) { addPointToTree(tree, node->child1, id, depth + 1); } else { addPointToTree(tree, node->child2, id, depth + 1); } } } void freeIndex() { for (size_t i = 0; i < numTrees; ++i) { if (trees[i] != nullptr) trees[i]->~KDTree(); } pool.free(); } public: float getMaxCachedCost() { float cost = 0; for (size_t i = 0; i < numTrees; ++i) { if (cost < trees[i]->getCachedCost()) { cost = trees[i]->getCachedCost(); } } return cost; } std::vector<float> getCachedImbalances() { std::vector<float> imbalances; for (size_t i = 0; i < numTrees; ++i) { imbalances.push_back(trees[i]->getCachedImbalance()); } return imbalances; } std::vector<float> recomputeImbalances() { std::vector<float> imbalances; for (size_t i = 0; i < numTrees; ++i) { imbalances.push_back(trees[i]->computeImbalance()); } return imbalances; } size_t computeMaxDepth() { size_t maxDepth = 0; for (size_t j = 0; j < numTrees; ++j) { size_t depth = trees[j]->computeMaxDepth(); if (maxDepth < depth) maxDepth = depth; } return maxDepth; } void printBackstage() { std::cout << "queue size: " << queue.size() << std::endl; std::cout << "ongoingTree size: " << ongoingTree->size << std::endl; } public: UpdateStatus updateStatus = UpdateStatus::NoUpdate; KDTree<NodePtr>* ongoingTree; float queryLoss = 0.0; TreeWeight weight; private: float reconstructionWeight; // lower => more update size_t sizeAtUpdate = 0; std::queue<NodeSplit> queue; std::vector<size_t> ids; }; } #endif
d2q9-bgk.c
/* ** Code to implement a d2q9-bgk lattice boltzmann scheme. ** 'd2' inidates a 2-dimensional grid, and ** 'q9' indicates 9 velocities per grid cell. ** 'bgk' refers to the Bhatnagar-Gross-Krook collision step. ** ** The 'speeds' in each cell are numbered as follows: ** ** 6 2 5 ** \|/ ** 3-0-1 ** /|\ ** 7 4 8 ** ** A 2D grid: ** ** cols ** --- --- --- ** | D | E | F | ** rows --- --- --- ** | A | B | C | ** --- --- --- ** ** 'unwrapped' in row major order to give a 1D array: ** ** --- --- --- --- --- --- ** | A | B | C | D | E | F | ** --- --- --- --- --- --- ** ** Grid indicies are: ** ** ny ** ^ cols(jj) ** | ----- ----- ----- ** | | ... | ... | etc | ** | ----- ----- ----- ** rows(ii) | | 1,0 | 1,1 | 1,2 | ** | ----- ----- ----- ** | | 0,0 | 0,1 | 0,2 | ** | ----- ----- ----- ** ----------------------> nx ** ** Note the names of the input parameter and obstacle files ** are passed on the command line, e.g.: ** ** d2q9-bgk.exe input.params obstacles.dat ** ** Be sure to adjust the grid dimensions in the parameter file ** if you choose a different obstacle file. */ #include<stdio.h> #include<stdlib.h> #include<math.h> #include<time.h> #include<sys/time.h> #include<sys/resource.h> #include<omp.h> //#include<fenv.h> #define NSPEEDS 9 #define FINALSTATEFILE "final_state.dat" #define AVVELSFILE "av_vels.dat" #define BLOCKSIZE 16 //Not used #define NUMTHREADS 16 #define PAR //Comment this out and set NUMTHREADS to 1 for serial //Vector size #define VECSIZE 4 //nyhalf = ny/2 #define getcelladdr(ii,jj,arr1,arr2,nyhalf,nx) ((ii<nyhalf) ? (&(arr1[ii*nx+jj])) : (&(arr2[(ii-nyhalf)*nx+jj]))) #define getcellval(ii,jj,arr1,arr2,nyhalf,nx) ((ii<nyhalf) ? (arr1[ii*nx+jj]) : (arr2[(ii-nyhalf)*nx+jj])) #define getcellspeed(ii,jj,sp,arr1,arr2,nyhalf,nx) ((ii<nyhalf) ? (arr1[ii*nx+jj].speeds[sp]) : (arr2[(ii-nyhalf)*nx+jj].speeds[sp])) /* struct to hold the parameter values */ struct __declspec(align(32)) t_param { double density; /* density per link */ double accel; /* density redistribution */ double omega; /* relaxation parameter */ double free_cells_inv; int nx; /* no. of cells in x-direction */ int ny; /* no. of cells in y-direction */ int nyhalf; /* to prevent it from redoing the division */ int maxIters; /* no. of iterations */ int reynolds_dim; /* dimension for Reynolds number */ }; typedef struct t_param t_param; /* struct to hold the 'speed' values */ typedef struct { double speeds[NSPEEDS]; } t_speed; /* ** function prototypes */ /* load params, allocate memory, load obstacles & initialise fluid particle densities */ int initialise(const char* paramfile, const char* obstaclefile, t_param* params, t_speed** cells_ptr0, t_speed** cells_ptr1, t_speed** tmp_cells_ptr0, t_speed** tmp_cells_ptr1, int** obstacles_ptr0, int** obstacles_ptr1, double** av_vels_ptr); void preprocess_obstacles(int* obstacles,const t_param params); /* ** The main calculation methods. ** timestep calls, in order, the functions: ** accelerate_flow(), propagate(), rebound() & collision() */ int accelerate_flow(const t_param params, t_speed* restrict cells1, int* restrict obstacles1); //int propagate(const t_param params, t_speed** cells_ptr, t_speed** tmp_cells_ptr); //int rebound(const t_param params, t_speed** cells_ptr, t_speed** tmp_cells_ptr, int* obstacles); //int collision(const t_param params, t_speed** cells_ptr, t_speed** tmp_cells_ptr, int* obstacles); double timestep(const t_param params, t_speed* restrict cells0, t_speed* restrict cells1, t_speed* restrict tmp_cells0, t_speed* restrict tmp_cells1, int* restrict obstacles0, int* restrict obstacles1, int tid); double timestep_row(const t_param params, t_speed* cells0, t_speed* cells1, t_speed* tmp_cells0, t_speed* tmp_cells1, int* obstacles0, int* obstacles1, int ii, int tid); int write_values(const t_param params, t_speed* cells0, t_speed* cells1, int* obstacles0, int* obstacles1, double* av_vels); /* finalise, including freeing up allocated memory */ int finalise(const t_param* params, t_speed** cells_ptr0, t_speed** cells_ptr1, t_speed** tmp_cells_ptr0, t_speed** tmp_cells_ptr1, int** obstacles_ptr0, int** obstacles_ptr1, double** av_vels_ptr); /* Sum all the densities in the grid. ** The total should remain constant from one timestep to the next. */ double total_density(const t_param params, t_speed* cells0, t_speed* cells1); /* compute average velocity */ double av_velocity(const t_param params, t_speed* cells0, t_speed* cells1, int* obstacles0, int* obstacles1); /* calculate Reynolds number */ double calc_reynolds(const t_param params, t_speed* cells0, t_speed* cells1, int* obstacles0, int* obstacles1); /* utility functions */ void die(const char* message, const int line, const char* file); void usage(const char* exe); /* ** main program: ** initialise, timestep loop, finalise */ int main(int argc, char* argv[]) { char* paramfile = NULL; /* name of the input parameter file */ char* obstaclefile = NULL; /* name of a the input obstacle file */ t_param params; /* struct to hold parameter values */ t_speed* cells0 = NULL; /* grid containing fluid densities */ t_speed* cells1 = NULL; t_speed* tmp_cells0 = NULL; t_speed* tmp_cells1 = NULL; /* scratch space */ int* obstacles0 = NULL; /* grid indicating which cells are blocked */ int* obstacles1 = NULL; double* av_vels = NULL; /* a record of the av. velocity computed for each timestep */ struct timeval timstr; /* structure to hold elapsed time */ struct rusage ru; /* structure to hold CPU time--system and user */ double tic, toc; /* floating point numbers to calculate elapsed wallclock time */ double usrtim; /* floating point number to record elapsed user CPU time */ double systim; /* floating point number to record elapsed system CPU time */ omp_set_num_threads(NUMTHREADS); //feenableexcept(FE_INVALID | FE_OVERFLOW); /* parse the command line */ if (argc != 3) { usage(argv[0]); } else { paramfile = argv[1]; obstaclefile = argv[2]; } /* initialise our data structures and load values from file */ initialise(paramfile, obstaclefile, &params, &cells0, &cells1, &tmp_cells0, &tmp_cells1, &obstacles0, &obstacles1, &av_vels); /* iterate for maxIters timesteps */ gettimeofday(&timstr, NULL); tic = timstr.tv_sec + (timstr.tv_usec / 1000000.0); #ifdef PAR #pragma omp parallel firstprivate(tmp_cells0,cells0, tmp_cells1, cells1) #endif { int tid = omp_get_thread_num(); for (unsigned int tt = 0; tt < params.maxIters;tt++) { #ifdef PAR #pragma omp barrier #endif if(tid==NUMTHREADS-1){ accelerate_flow(params, cells1, obstacles1); } #ifdef PAR #pragma omp barrier #endif double local = timestep(params, cells0, cells1, tmp_cells0, tmp_cells1, obstacles0, obstacles1,tid); local += timestep_row(params, cells0, cells1, tmp_cells0, tmp_cells1, obstacles0, obstacles1,0,tid); local += timestep_row(params, cells0, cells1, tmp_cells0, tmp_cells1, obstacles0, obstacles1,params.nyhalf-1,tid); local += timestep_row(params, cells0, cells1, tmp_cells0, tmp_cells1, obstacles0, obstacles1,params.nyhalf,tid); local += timestep_row(params, cells0, cells1, tmp_cells0, tmp_cells1, obstacles0, obstacles1,params.ny-1,tid); #ifdef PAR #pragma omp atomic #endif av_vels[tt] += local * params.free_cells_inv; t_speed* tmp = cells0; cells0 = tmp_cells0; tmp_cells0 = tmp; tmp = cells1; cells1 = tmp_cells1; tmp_cells1= tmp; #ifdef DEBUG #ifdef PAR #pragma omp single nowait #endif { printf("==timestep: %d==\n", tt); printf("av velocity: %.12E\n", av_vels[tt]); printf("tot density: %.12E\n", total_density(params, cells0, cells1)); } #endif } } gettimeofday(&timstr, NULL); toc = timstr.tv_sec + (timstr.tv_usec / 1000000.0); getrusage(RUSAGE_SELF, &ru); timstr = ru.ru_utime; usrtim = timstr.tv_sec + (timstr.tv_usec / 1000000.0); timstr = ru.ru_stime; systim = timstr.tv_sec + (timstr.tv_usec / 1000000.0); /* write final values and free memory */ printf("==done==\n"); printf("Reynolds number:\t\t%.12E\n", calc_reynolds(params, cells0, cells1, obstacles0, obstacles1)); printf("Elapsed time:\t\t\t%.6lf (s)\n", toc - tic); printf("Elapsed user CPU time:\t\t%.6lf (s)\n", usrtim); printf("Elapsed system CPU time:\t%.6lf (s)\n", systim); write_values(params, cells0, cells1, obstacles0, obstacles1, av_vels); finalise(&params, &cells0, &cells1, &tmp_cells0, &tmp_cells1, &obstacles0, &obstacles1, &av_vels); return EXIT_SUCCESS; } inline int accelerate_flow(const t_param params, t_speed* restrict cells1, int* restrict obstacles1) { /* compute weighting factors */ double w1 = params.density * params.accel * 0.111111111111111111111111f; double w2 = params.density * params.accel * 0.0277777777777777777777778f; /* modify the 2nd row of the grid */ int ii = params.nyhalf - 2; //int tid = omp_get_thread_num(); //int start = tid * (params.nx/NUMTHREADS); //int end = (tid+1) * (params.nx/NUMTHREADS); for (unsigned int jj = 0; jj < params.nx; jj+=VECSIZE) { #pragma vector aligned for(int k=0;k<VECSIZE;k++){ if (!obstacles1[ii * params.nx + jj+k] && cells1[ii*params.nx+jj+k].speeds[3]-w1>0.0 && cells1[ii*params.nx+jj+k].speeds[6]-w2>0.0 && cells1[ii*params.nx+jj+k].speeds[7]-w2>0.0){ /* increase 'east-side' densities */ cells1[ii * params.nx + jj+k].speeds[1] += w1; cells1[ii * params.nx + jj+k].speeds[5] += w2; cells1[ii * params.nx + jj+k].speeds[8] += w2; /* decrease 'west-side' densities */ cells1[ii * params.nx + jj+k].speeds[3] -= w1; cells1[ii * params.nx + jj+k].speeds[6] -= w2; cells1[ii * params.nx + jj+k].speeds[7] -= w2; } } } return EXIT_SUCCESS; } //double sqrt13(double n) //{ // double result; // // __asm__( // "fsqrt\n\t" // : "=t"(result) : "0"(n) // ); // // return result; //} inline double timestep_row(const t_param params, t_speed* cells0, t_speed* cells1, t_speed* tmp_cells0, t_speed* tmp_cells1, int* obstacles0, int* obstacles1, int ii, int tid) { static const double c_sq = 1.0 / 3.0; /* square of speed of sound */ static const double twooverthree = 2.0/3.0; static const double two_c_sq_sq = 2.0 / 9.0; static const double w0 = 4.0 / 81.0 * 4.5; /* weighting factor */ static const double w1 = 1.0 / 9.0 * 4.5 ; /* weighting factor */ static const double w2 = 1.0 / 36.0 * 4.5; /* weighting factor */ double oneminusomega = 1.0 - params.omega; double tot_u = 0.0; int y_n = ii+1; if(y_n == params.ny) y_n = 0; int y_s = (ii == 0) ? (params.ny - 1) : (ii - 1); int start = tid * (params.nx/NUMTHREADS); int end = (tid+1) * (params.nx/NUMTHREADS); for(unsigned int jj = start; jj < end; jj++){ /* determine indices of axis-direction neighbours ** respecting periodic boundary conditions (wrap around) */ int x_e = jj + 1; if (x_e == params.nx) x_e = 0; int x_w = (jj == 0) ? (params.nx - 1) : (jj - 1); /* propagate densities to neighbouring cells, following ** appropriate directions of travel and writing into ** scratch space grid */ t_speed *const tmp_cell = getcelladdr(ii,jj,tmp_cells0,tmp_cells1,params.nyhalf,params.nx); //Reverse the operation such that after each iteration the current cell is fully updated //and hence the loop can be merged with the next step if(0 == getcellval(ii,jj,obstacles0,obstacles1,params.nyhalf,params.nx)){ double local_density = tmp_cell->speeds[0] = getcellspeed(ii,jj,0,cells0,cells1,params.nyhalf,params.nx); local_density += tmp_cell->speeds[1] = getcellspeed(ii,x_w,1,cells0,cells1,params.nyhalf,params.nx); local_density += tmp_cell->speeds[2] = getcellspeed(y_s,jj,2,cells0,cells1,params.nyhalf,params.nx); local_density += tmp_cell->speeds[3] = getcellspeed(ii,x_e,3,cells0,cells1,params.nyhalf,params.nx); local_density += tmp_cell->speeds[4] = getcellspeed(y_n,jj,4,cells0,cells1,params.nyhalf,params.nx); local_density += tmp_cell->speeds[5] = getcellspeed(y_s,x_w,5,cells0,cells1,params.nyhalf,params.nx); local_density += tmp_cell->speeds[6] = getcellspeed(y_s,x_e,6,cells0,cells1,params.nyhalf,params.nx); local_density += tmp_cell->speeds[7] = getcellspeed(y_n,x_e,7,cells0,cells1,params.nyhalf,params.nx); local_density += tmp_cell->speeds[8] = getcellspeed(y_n,x_w,8,cells0,cells1,params.nyhalf,params.nx); /* compute x velocity component. NO DIVISION BY LOCAL DENSITY*/ double u_x = tmp_cell->speeds[1] + tmp_cell->speeds[5] + tmp_cell->speeds[8] - tmp_cell->speeds[3] - tmp_cell->speeds[6] - tmp_cell->speeds[7]; /* compute y velocity component. NO DIVISION BY LOCAL DENSITY */ double u_y = tmp_cell->speeds[2] + tmp_cell->speeds[5] + tmp_cell->speeds[6] - tmp_cell->speeds[4] - tmp_cell->speeds[7] - tmp_cell->speeds[8]; //EQUATIONS ARE VERY DIFFERENT BUT STILL DO THE SAME THING. const double u_x_sq = u_x * u_x; const double u_y_sq = u_y * u_y; const double u_xy = u_x + u_y; const double u_xy2 = u_x - u_y; const double ld_sq = local_density * local_density; const double c_sq_ld_2 = twooverthree * local_density; /* velocity squared */ const double u_sq = u_x_sq + u_y_sq; const double ldinv = 1.0/local_density; const double ldinvomega = ldinv*params.omega; /* equilibrium densities */ double d_equ[NSPEEDS]; /* zero velocity density: weight w0 */ d_equ[0] = w0 * (2*ld_sq-3*u_sq) * ldinvomega; /* axis speeds: weight w1 */ d_equ[1] = w1 * ( two_c_sq_sq*ld_sq + c_sq_ld_2*u_x + u_x_sq - u_sq*c_sq ) * ldinvomega; d_equ[2] = w1 * ( two_c_sq_sq*ld_sq + c_sq_ld_2*u_y + u_y_sq - u_sq*c_sq ) * ldinvomega; d_equ[3] = w1 * ( two_c_sq_sq*ld_sq - c_sq_ld_2*u_x + u_x_sq - u_sq*c_sq ) * ldinvomega; d_equ[4] = w1 * ( two_c_sq_sq*ld_sq - c_sq_ld_2*u_y + u_y_sq - u_sq*c_sq ) * ldinvomega; /* diagonal speeds: weight w2 */ d_equ[5] = w2 * ( two_c_sq_sq*ld_sq + c_sq_ld_2*u_xy + u_xy*u_xy - u_sq*c_sq ) * ldinvomega; d_equ[6] = w2 * ( two_c_sq_sq*ld_sq - c_sq_ld_2*u_xy2 + u_xy2*u_xy2 - u_sq*c_sq ) * ldinvomega; d_equ[7] = w2 * ( two_c_sq_sq*ld_sq - c_sq_ld_2*u_xy + u_xy*u_xy - u_sq*c_sq ) * ldinvomega; d_equ[8] = w2 * ( two_c_sq_sq*ld_sq + c_sq_ld_2*u_xy2 + u_xy2*u_xy2 - u_sq*c_sq ) * ldinvomega; /* relaxation step */ for (unsigned int kk = 0; kk < NSPEEDS; kk++) { tmp_cell->speeds[kk] = tmp_cell->speeds[kk]*oneminusomega; tmp_cell->speeds[kk] += d_equ[kk]; } tot_u += sqrt(u_x*u_x + u_y*u_y) * ldinv; } else{ tmp_cell->speeds[0] = getcellspeed(ii,jj,0,cells0,cells1,params.nyhalf,params.nx); tmp_cell->speeds[3] = getcellspeed(ii,x_w,1,cells0,cells1,params.nyhalf,params.nx); tmp_cell->speeds[4] = getcellspeed(y_s,jj,2,cells0,cells1,params.nyhalf,params.nx); tmp_cell->speeds[1] = getcellspeed(ii,x_e,3,cells0,cells1,params.nyhalf,params.nx); tmp_cell->speeds[2] = getcellspeed(y_n,jj,4,cells0,cells1,params.nyhalf,params.nx); tmp_cell->speeds[7] = getcellspeed(y_s,x_w,5,cells0,cells1,params.nyhalf,params.nx); tmp_cell->speeds[8] = getcellspeed(y_s,x_e,6,cells0,cells1,params.nyhalf,params.nx); tmp_cell->speeds[5] = getcellspeed(y_n,x_e,7,cells0,cells1,params.nyhalf,params.nx); tmp_cell->speeds[6] = getcellspeed(y_n,x_w,8,cells0,cells1,params.nyhalf,params.nx); } } return tot_u; } inline double timestep(const t_param params, t_speed* restrict cells0, t_speed* restrict cells1, t_speed* restrict tmp_cells0, t_speed* restrict tmp_cells1, int* restrict obstacles0, int* restrict obstacles1, int tid) { //static const double c_sq = 1.0 / 3.0; /* square of speed of sound */ static const double ic_sq = 3.0; //static const double ic_sq_sq = 9.0; static const double w0 = 4.0 / 9.0; /* weighting factor */ static const double w1 = 1.0 / 9.0; /* weighting factor */ static const double w2 = 1.0 / 36.0; /* weighting factor */ double tot_u = 0.0; /* loop over the cells in the grid ** NB the collision step is called after ** the propagate step and so values of interest ** are in the scratch-space grid */ int start = tid * (params.ny/NUMTHREADS); int end = (tid+1) * (params.ny/NUMTHREADS); for (unsigned int ii = start; ii < end; ii++) { if (ii==0 || ii==(params.nyhalf-1) || ii==params.nyhalf || ii==(params.ny-1) ) continue; //special cases. handle them elsewhere t_speed* restrict cells = NULL; t_speed* restrict tmp_cells = NULL; int* restrict obstacles = NULL; int qq = 0; if(ii<params.nyhalf){ cells = cells0; tmp_cells = tmp_cells0; obstacles = obstacles0; qq = ii; } else{ cells = cells1; tmp_cells = tmp_cells1; obstacles = obstacles1; qq = ii - params.nyhalf; } int y_n = qq + 1; int y_s = qq - 1; for(unsigned int jj = 0; jj < params.nx; jj+=VECSIZE){ /* determine indices of axis-direction neighbours ** respecting periodic boundary conditions (wrap around) */ double tmp[VECSIZE*NSPEEDS] __attribute__((aligned(32))); #pragma vector aligned for(int k=0;k<VECSIZE;k++){ int x = jj+k; int x_e = x + 1; if(x_e >= params.nx) x_e -= params.nx; int x_w = (x == 0) ? (params.nx - 1) : (x-1); tmp[VECSIZE*0+k] = cells[qq * params.nx + x].speeds[0]; tmp[VECSIZE*1+k] = cells[qq * params.nx + x_w].speeds[1]; tmp[VECSIZE*2+k] = cells[y_s * params.nx + x].speeds[2]; tmp[VECSIZE*3+k] = cells[qq * params.nx + x_e].speeds[3]; tmp[VECSIZE*4+k] = cells[y_n * params.nx + x].speeds[4]; tmp[VECSIZE*5+k] = cells[y_s * params.nx + x_w].speeds[5]; tmp[VECSIZE*6+k] = cells[y_s * params.nx + x_e].speeds[6]; tmp[VECSIZE*7+k] = cells[y_n * params.nx + x_e].speeds[7]; tmp[VECSIZE*8+k] = cells[y_n * params.nx + x_w].speeds[8]; } double densvec[VECSIZE] __attribute__((aligned(32))); #pragma vector aligned for(int k=0;k<VECSIZE;k++){ densvec[k] = tmp[VECSIZE*0+k]; densvec[k] += tmp[VECSIZE*1+k]; densvec[k] += tmp[VECSIZE*2+k]; densvec[k] += tmp[VECSIZE*3+k]; densvec[k] += tmp[VECSIZE*4+k]; densvec[k] += tmp[VECSIZE*5+k]; densvec[k] += tmp[VECSIZE*6+k]; densvec[k] += tmp[VECSIZE*7+k]; densvec[k] += tmp[VECSIZE*8+k]; } double densinv[VECSIZE] __attribute__((aligned(32))); #pragma vector aligned for(int k=0;k<VECSIZE;k++) { densinv[k] = 1.0/densvec[k]; } double u_x[VECSIZE] __attribute__((aligned(32))); double u_y[VECSIZE] __attribute__((aligned(32))); #pragma vector aligned for(int k=0;k<VECSIZE;k++) { u_x[k] = tmp[VECSIZE*1+k] + tmp[VECSIZE*5+k]; u_x[k] += tmp[VECSIZE*8+k]; u_x[k] -= tmp[VECSIZE*3+k]; u_x[k] -= tmp[VECSIZE*6+k]; u_x[k] -= tmp[VECSIZE*7+k]; //u_x[k] *= densinv[k]; u_y[k] = tmp[VECSIZE*2+k] + tmp[VECSIZE*5+k]; u_y[k] += tmp[VECSIZE*6+k]; u_y[k] -= tmp[VECSIZE*4+k]; u_y[k] -= tmp[VECSIZE*7+k]; u_y[k] -= tmp[VECSIZE*8+k]; //u_y[k] *= densinv[k]; } double u_sq[VECSIZE] __attribute__((aligned(32))); #pragma vector aligned for(int k=0;k<VECSIZE;k++) { u_sq[k] = u_x[k]*u_x[k] + u_y[k]*u_y[k]; } double uvec[NSPEEDS*VECSIZE] __attribute__((aligned(32))); #pragma vector aligned for(int k=0;k<VECSIZE;k++) { uvec[VECSIZE*1+k] = u_x[k]; uvec[VECSIZE*2+k] = u_y[k]; uvec[VECSIZE*3+k] = - u_x[k]; uvec[VECSIZE*4+k] = - u_y[k]; uvec[VECSIZE*5+k] = u_x[k] + u_y[k]; uvec[VECSIZE*6+k] = - u_x[k] + u_y[k]; uvec[VECSIZE*7+k] = - u_x[k] - u_y[k]; uvec[VECSIZE*8+k] = u_x[k] - u_y[k]; } double ic_sqtimesu[NSPEEDS*VECSIZE] __attribute__((aligned(32))); #pragma vector aligned for(int k=0;k<VECSIZE;k++) { ic_sqtimesu[VECSIZE*1+k] = uvec[VECSIZE*1+k]*ic_sq; ic_sqtimesu[VECSIZE*2+k] = uvec[VECSIZE*2+k]*ic_sq; ic_sqtimesu[VECSIZE*3+k] = uvec[VECSIZE*3+k]*ic_sq; ic_sqtimesu[VECSIZE*4+k] = uvec[VECSIZE*4+k]*ic_sq; ic_sqtimesu[VECSIZE*5+k] = uvec[VECSIZE*5+k]*ic_sq; ic_sqtimesu[VECSIZE*6+k] = uvec[VECSIZE*6+k]*ic_sq; ic_sqtimesu[VECSIZE*7+k] = uvec[VECSIZE*7+k]*ic_sq; ic_sqtimesu[VECSIZE*8+k] = uvec[VECSIZE*8+k]*ic_sq; } double ic_sqtimesu_sq[NSPEEDS*VECSIZE] __attribute__((aligned(32))); #pragma vector aligned for(int k=0;k<VECSIZE;k++) { ic_sqtimesu_sq[VECSIZE*1+k] = ic_sqtimesu[VECSIZE*1+k] * uvec[VECSIZE*1+k]; ic_sqtimesu_sq[VECSIZE*2+k] = ic_sqtimesu[VECSIZE*2+k] * uvec[VECSIZE*2+k]; ic_sqtimesu_sq[VECSIZE*3+k] = ic_sqtimesu[VECSIZE*3+k] * uvec[VECSIZE*3+k]; ic_sqtimesu_sq[VECSIZE*4+k] = ic_sqtimesu[VECSIZE*4+k] * uvec[VECSIZE*4+k]; ic_sqtimesu_sq[VECSIZE*5+k] = ic_sqtimesu[VECSIZE*5+k] * uvec[VECSIZE*5+k]; ic_sqtimesu_sq[VECSIZE*6+k] = ic_sqtimesu[VECSIZE*6+k] * uvec[VECSIZE*6+k]; ic_sqtimesu_sq[VECSIZE*7+k] = ic_sqtimesu[VECSIZE*7+k] * uvec[VECSIZE*7+k]; ic_sqtimesu_sq[VECSIZE*8+k] = ic_sqtimesu[VECSIZE*8+k] * uvec[VECSIZE*8+k]; } double d_equ[NSPEEDS*VECSIZE] __attribute__((aligned(32))); #pragma vector aligned for(int k=0;k<VECSIZE;k++) { d_equ[VECSIZE*0+k] = w0 * (densvec[k] - 0.5*densinv[k]*ic_sq*u_sq[k]); d_equ[VECSIZE*1+k] = w1 * (densvec[k] + ic_sqtimesu[VECSIZE*1+k] + 0.5 * densinv[k]*ic_sq * (ic_sqtimesu_sq[VECSIZE*1+k]-u_sq[k]) ); d_equ[VECSIZE*2+k] = w1 * (densvec[k] + ic_sqtimesu[VECSIZE*2+k] + 0.5 * densinv[k]*ic_sq * (ic_sqtimesu_sq[VECSIZE*2+k]-u_sq[k]) ); d_equ[VECSIZE*3+k] = w1 * (densvec[k] + ic_sqtimesu[VECSIZE*3+k] + 0.5 * densinv[k]*ic_sq * (ic_sqtimesu_sq[VECSIZE*3+k]-u_sq[k]) ); d_equ[VECSIZE*4+k] = w1 * (densvec[k] + ic_sqtimesu[VECSIZE*4+k] + 0.5 * densinv[k]*ic_sq * (ic_sqtimesu_sq[VECSIZE*4+k]-u_sq[k]) ); d_equ[VECSIZE*5+k] = w2 * (densvec[k] + ic_sqtimesu[VECSIZE*5+k] + 0.5 * densinv[k]*ic_sq * (ic_sqtimesu_sq[VECSIZE*5+k]-u_sq[k]) ); d_equ[VECSIZE*6+k] = w2 * (densvec[k] + ic_sqtimesu[VECSIZE*6+k] + 0.5 * densinv[k]*ic_sq * (ic_sqtimesu_sq[VECSIZE*6+k]-u_sq[k]) ); d_equ[VECSIZE*7+k] = w2 * (densvec[k] + ic_sqtimesu[VECSIZE*7+k] + 0.5 * densinv[k]*ic_sq * (ic_sqtimesu_sq[VECSIZE*7+k]-u_sq[k]) ); d_equ[VECSIZE*8+k] = w2 * (densvec[k] + ic_sqtimesu[VECSIZE*8+k] + 0.5 * densinv[k]*ic_sq * (ic_sqtimesu_sq[VECSIZE*8+k]-u_sq[k]) ); } int obst=0; #pragma vector aligned for(int k=0;k<VECSIZE;k++){ obst+=obstacles[qq*params.nx+jj+k]; } if(!obst){ #pragma vector aligned for(int k=0;k<VECSIZE;k++){ tmp_cells[qq * params.nx + jj + k].speeds[0] = tmp[VECSIZE*0+k] + params.omega*(d_equ[VECSIZE*0+k] - tmp[VECSIZE*0+k]); tmp_cells[qq * params.nx + jj + k].speeds[1] = tmp[VECSIZE*1+k] + params.omega*(d_equ[VECSIZE*1+k] - tmp[VECSIZE*1+k]); tmp_cells[qq * params.nx + jj + k].speeds[2] = tmp[VECSIZE*2+k] + params.omega*(d_equ[VECSIZE*2+k] - tmp[VECSIZE*2+k]); tmp_cells[qq * params.nx + jj + k].speeds[3] = tmp[VECSIZE*3+k] + params.omega*(d_equ[VECSIZE*3+k] - tmp[VECSIZE*3+k]); tmp_cells[qq * params.nx + jj + k].speeds[4] = tmp[VECSIZE*4+k] + params.omega*(d_equ[VECSIZE*4+k] - tmp[VECSIZE*4+k]); tmp_cells[qq * params.nx + jj + k].speeds[5] = tmp[VECSIZE*5+k] + params.omega*(d_equ[VECSIZE*5+k] - tmp[VECSIZE*5+k]); tmp_cells[qq * params.nx + jj + k].speeds[6] = tmp[VECSIZE*6+k] + params.omega*(d_equ[VECSIZE*6+k] - tmp[VECSIZE*6+k]); tmp_cells[qq * params.nx + jj + k].speeds[7] = tmp[VECSIZE*7+k] + params.omega*(d_equ[VECSIZE*7+k] - tmp[VECSIZE*7+k]); tmp_cells[qq * params.nx + jj + k].speeds[8] = tmp[VECSIZE*8+k] + params.omega*(d_equ[VECSIZE*8+k] - tmp[VECSIZE*8+k]); tot_u += sqrt(u_sq[k]) * densinv[k]; } } else{ #pragma vector aligned for(int k=0;k<VECSIZE;k++){ if(!obstacles[qq * params.nx +jj +k]){ tmp_cells[qq * params.nx + jj + k].speeds[0] = tmp[VECSIZE*0+k] + params.omega*(d_equ[VECSIZE*0+k] - tmp[VECSIZE*0+k]); tmp_cells[qq * params.nx + jj + k].speeds[1] = tmp[VECSIZE*1+k] + params.omega*(d_equ[VECSIZE*1+k] - tmp[VECSIZE*1+k]); tmp_cells[qq * params.nx + jj + k].speeds[2] = tmp[VECSIZE*2+k] + params.omega*(d_equ[VECSIZE*2+k] - tmp[VECSIZE*2+k]); tmp_cells[qq * params.nx + jj + k].speeds[3] = tmp[VECSIZE*3+k] + params.omega*(d_equ[VECSIZE*3+k] - tmp[VECSIZE*3+k]); tmp_cells[qq * params.nx + jj + k].speeds[4] = tmp[VECSIZE*4+k] + params.omega*(d_equ[VECSIZE*4+k] - tmp[VECSIZE*4+k]); tmp_cells[qq * params.nx + jj + k].speeds[5] = tmp[VECSIZE*5+k] + params.omega*(d_equ[VECSIZE*5+k] - tmp[VECSIZE*5+k]); tmp_cells[qq * params.nx + jj + k].speeds[6] = tmp[VECSIZE*6+k] + params.omega*(d_equ[VECSIZE*6+k] - tmp[VECSIZE*6+k]); tmp_cells[qq * params.nx + jj + k].speeds[7] = tmp[VECSIZE*7+k] + params.omega*(d_equ[VECSIZE*7+k] - tmp[VECSIZE*7+k]); tmp_cells[qq * params.nx + jj + k].speeds[8] = tmp[VECSIZE*8+k] + params.omega*(d_equ[VECSIZE*8+k] - tmp[VECSIZE*8+k]); tot_u += sqrt(u_sq[k]) * densinv[k]; } else{ tmp_cells[qq * params.nx + jj + k].speeds[0] = tmp[VECSIZE*0+k]; tmp_cells[qq * params.nx + jj + k].speeds[3] = tmp[VECSIZE*1+k]; tmp_cells[qq * params.nx + jj + k].speeds[4] = tmp[VECSIZE*2+k]; tmp_cells[qq * params.nx + jj + k].speeds[1] = tmp[VECSIZE*3+k]; tmp_cells[qq * params.nx + jj + k].speeds[2] = tmp[VECSIZE*4+k]; tmp_cells[qq * params.nx + jj + k].speeds[7] = tmp[VECSIZE*5+k]; tmp_cells[qq * params.nx + jj + k].speeds[8] = tmp[VECSIZE*6+k]; tmp_cells[qq * params.nx + jj + k].speeds[5] = tmp[VECSIZE*7+k]; tmp_cells[qq * params.nx + jj + k].speeds[6] = tmp[VECSIZE*8+k]; } } } } } return tot_u; } double av_velocity(const t_param params, t_speed* cells0, t_speed* cells1, int* obstacles0, int* obstacles1) { double tot_u; /* accumulated magnitudes of velocity for each cell */ /* initialise */ tot_u = 0.0; /* loop over all non-blocked cells */ for (unsigned int ii = 0; ii < params.ny; ii++) { for (unsigned int jj = 0; jj < params.nx; jj++) { /* ignore occupied cells */ if (0 == getcellval(ii,jj,obstacles0,obstacles1,params.nyhalf,params.nx)) { /* local density total */ double local_density = 0.0; for (unsigned int kk = 0; kk < NSPEEDS; kk++) { local_density += getcellspeed(ii,jj,kk,cells0,cells1,params.nyhalf,params.nx); } /* x-component of velocity */ t_speed* cell = getcelladdr(ii,jj,cells0,cells1,params.nyhalf,params.nx); double u_x = (cell->speeds[1] + cell->speeds[5] + cell->speeds[8] - (cell->speeds[3] + cell->speeds[6] + cell->speeds[7])) / local_density; /* compute y velocity component */ double u_y = (cell->speeds[2] + cell->speeds[5] + cell->speeds[6] - (cell->speeds[4] + cell->speeds[7] + cell->speeds[8])) / local_density; /* accumulate the norm of x- and y- velocity components */ tot_u += sqrt((u_x * u_x) + (u_y * u_y)); } } } return tot_u * params.free_cells_inv; } int initialise(const char* paramfile, const char* obstaclefile, t_param* params, t_speed** cells_ptr0, t_speed** cells_ptr1, t_speed** tmp_cells_ptr0, t_speed** tmp_cells_ptr1, int** obstacles_ptr0, int** obstacles_ptr1, double** av_vels_ptr) { char message[1024]; /* message buffer */ FILE* fp; /* file pointer */ int xx, yy; /* generic array indices */ int blocked; /* indicates whether a cell is blocked by an obstacle */ int retval; /* to hold return value for checking */ /* open the parameter file */ fp = fopen(paramfile, "r"); if (fp == NULL) { sprintf(message, "could not open input parameter file: %s", paramfile); die(message, __LINE__, __FILE__); } /* read in the parameter values */ retval = fscanf(fp, "%d\n", &(params->nx)); if (retval != 1) die("could not read param file: nx", __LINE__, __FILE__); retval = fscanf(fp, "%d\n", &(params->ny)); if (retval != 1) die("could not read param file: ny", __LINE__, __FILE__); retval = fscanf(fp, "%d\n", &(params->maxIters)); if (retval != 1) die("could not read param file: maxIters", __LINE__, __FILE__); retval = fscanf(fp, "%d\n", &(params->reynolds_dim)); if (retval != 1) die("could not read param file: reynolds_dim", __LINE__, __FILE__); retval = fscanf(fp, "%lf\n", &(params->density)); if (retval != 1) die("could not read param file: density", __LINE__, __FILE__); retval = fscanf(fp, "%lf\n", &(params->accel)); if (retval != 1) die("could not read param file: accel", __LINE__, __FILE__); retval = fscanf(fp, "%lf\n", &(params->omega)); if (retval != 1) die("could not read param file: omega", __LINE__, __FILE__); /* and close up the file */ fclose(fp); int numOfFreeCells = params->nx*params->ny; params->nyhalf = params->ny/2; /* ** Allocate memory. ** ** Remember C is pass-by-value, so we need to ** pass pointers into the initialise function. ** ** NB we are allocating a 1D array, so that the ** memory will be contiguous. We still want to ** index this memory as if it were a (row major ** ordered) 2D array, however. We will perform ** some arithmetic using the row and column ** coordinates, inside the square brackets, when ** we want to access elements of this array. ** ** Note also that we are using a structure to ** hold an array of 'speeds'. We will allocate ** a 1D array of these structs. */ /* main grid */ /* Fortunately, blue crystal's compute */ #ifdef PAR #pragma omp parallel #endif { #ifdef PAR int tid = omp_get_thread_num(); #else int tid = 0; #endif if(tid == 0){ *cells_ptr0 = (t_speed*)malloc(sizeof(t_speed) * (params->nyhalf * params->nx)); if (*cells_ptr0 == NULL) die("cannot allocate memory for cells", __LINE__, __FILE__); /* 'helper' grid, used as scratch space */ *tmp_cells_ptr0 = (t_speed*)malloc(sizeof(t_speed) * (params->nyhalf * params->nx)); if (*tmp_cells_ptr0 == NULL) die("cannot allocate memory for tmp_cells", __LINE__, __FILE__); /* the map of obstacles */ *obstacles_ptr0 = (int*)malloc(sizeof(int) * (params->nyhalf * params->nx)); if (*obstacles_ptr0 == NULL) die("cannot allocate column memory for obstacles", __LINE__, __FILE__); } if(tid == NUMTHREADS/2){ *cells_ptr1 = (t_speed*)malloc(sizeof(t_speed) * (params->nyhalf * params->nx)); if (*cells_ptr1 == NULL) die("cannot allocate memory for cells", __LINE__, __FILE__); /* 'helper' grid, used as scratch space */ *tmp_cells_ptr1 = (t_speed*)malloc(sizeof(t_speed) * (params->nyhalf * params->nx)); if (*tmp_cells_ptr1 == NULL) die("cannot allocate memory for tmp_cells", __LINE__, __FILE__); /* the map of obstacles */ *obstacles_ptr1 = (int*)malloc(sizeof(int) * (params->nyhalf * params->nx)); if (*obstacles_ptr1 == NULL) die("cannot allocate column memory for obstacles", __LINE__, __FILE__); } } /* initialise densities */ double w0 = params->density * 4.0 / 9.0; double w1 = params->density / 9.0; double w2 = params->density / 36.0; for (unsigned int ii = 0; ii < params->ny; ii++) { for (unsigned int jj = 0; jj < params->nx; jj++) { t_speed* cell = getcelladdr(ii,jj,(*cells_ptr0),(*cells_ptr1),params->nyhalf,params->nx); /* centre */ cell->speeds[0] = w0; /* axis directions */ cell->speeds[1] = w1; cell->speeds[2] = w1; cell->speeds[3] = w1; cell->speeds[4] = w1; /* diagonals */ cell->speeds[5] = w2; cell->speeds[6] = w2; cell->speeds[7] = w2; cell->speeds[8] = w2; } } /* first set all cells in obstacle array to zero */ for (unsigned int ii = 0; ii < params->ny; ii++) { for (unsigned int jj = 0; jj < params->nx; jj++) { int* cell = getcelladdr(ii,jj,(*obstacles_ptr0),(*obstacles_ptr1),params->nyhalf,params->nx); *cell = 0; } } /* open the obstacle data file */ fp = fopen(obstaclefile, "r"); if (fp == NULL) { sprintf(message, "could not open input obstacles file: %s", obstaclefile); die(message, __LINE__, __FILE__); } /* read-in the blocked cells list */ while ((retval = fscanf(fp, "%d %d %d\n", &xx, &yy, &blocked)) != EOF) { /* some checks */ if (retval != 3) die("expected 3 values per line in obstacle file", __LINE__, __FILE__); if (xx < 0 || xx > params->nx - 1) die("obstacle x-coord out of range", __LINE__, __FILE__); if (yy < 0 || yy > params->ny - 1) die("obstacle y-coord out of range", __LINE__, __FILE__); if (blocked != 1) die("obstacle blocked value should be 1", __LINE__, __FILE__); /* assign to array */ if(0 == getcellval(yy,xx,(*obstacles_ptr0),(*obstacles_ptr1),params->nyhalf,params->nx)) numOfFreeCells--; int* cell = getcelladdr(yy,xx,(*obstacles_ptr0),(*obstacles_ptr1),params->nyhalf,params->nx); *cell = blocked; } params->free_cells_inv = 1.0/numOfFreeCells; /* and close the file */ fclose(fp); //preprocess_obstacles(*obstacles_ptr,*params); /* ** allocate space to hold a record of the avarage velocities computed ** at each timestep */ *av_vels_ptr = (double*)malloc(sizeof(double) * params->maxIters); return EXIT_SUCCESS; } int finalise(const t_param* params, t_speed** cells_ptr0, t_speed** cells_ptr1, t_speed** tmp_cells_ptr0, t_speed** tmp_cells_ptr1, int** obstacles_ptr0, int** obstacles_ptr1, double** av_vels_ptr) { /* ** free up allocated memory */ free(*cells_ptr0); *cells_ptr0 = NULL; free(*cells_ptr1); *cells_ptr1 = NULL; free(*tmp_cells_ptr0); *tmp_cells_ptr0 = NULL; free(*tmp_cells_ptr1); *tmp_cells_ptr1 = NULL; free(*obstacles_ptr0); *obstacles_ptr0 = NULL; free(*obstacles_ptr1); *obstacles_ptr1 = NULL; free(*av_vels_ptr); *av_vels_ptr = NULL; return EXIT_SUCCESS; } double calc_reynolds(const t_param params, t_speed* cells0, t_speed* cells1, int* obstacles0, int* obstacles1) { const double viscosity = 1.0 / 6.0 * (2.0 / params.omega - 1.0); return av_velocity(params, cells0, cells1, obstacles0, obstacles1) * params.reynolds_dim / viscosity; } double total_density(const t_param params, t_speed* cells0, t_speed* cells1) { double total = 0.0; /* accumulator */ for (unsigned int ii = 0; ii < params.ny; ii++) { for (unsigned int jj = 0; jj < params.nx; jj++) { for (unsigned int kk = 0; kk < NSPEEDS; kk++) { total += getcellspeed(ii,jj,kk,cells0,cells1,params.nyhalf,params.nx); } } } return total; } int write_values(const t_param params, t_speed* cells0, t_speed* cells1, int* obstacles0, int* obstacles1, double* av_vels) { FILE* fp; /* file pointer */ const double c_sq = 1.0 / 3.0; /* sq. of speed of sound */ double local_density; /* per grid cell sum of densities */ double pressure; /* fluid pressure in grid cell */ double u_x; /* x-component of velocity in grid cell */ double u_y; /* y-component of velocity in grid cell */ double u; /* norm--root of summed squares--of u_x and u_y */ fp = fopen(FINALSTATEFILE, "w"); if (fp == NULL) { die("could not open file output file", __LINE__, __FILE__); } for (unsigned int ii = 0; ii < params.ny; ii++) { for (unsigned int jj = 0; jj < params.nx; jj++) { /* an occupied cell */ if (1 == getcellval(ii,jj,obstacles0,obstacles1,params.nyhalf,params.nx)) { u_x = u_y = u = 0.0; pressure = params.density * c_sq; } /* no obstacle */ else { local_density = 0.0; t_speed* cell = getcelladdr(ii,jj,cells0,cells1,params.nyhalf,params.nx); for (unsigned int kk = 0; kk < NSPEEDS; kk++) { local_density += cell->speeds[kk]; } /* compute x velocity component */ u_x = (cell->speeds[1] + cell->speeds[5] + cell->speeds[8] - (cell->speeds[3] + cell->speeds[6] + cell->speeds[7])) / local_density; /* compute y velocity component */ u_y = (cell->speeds[2] + cell->speeds[5] + cell->speeds[6] - (cell->speeds[4] + cell->speeds[7] + cell->speeds[8])) / local_density; /* compute norm of velocity */ u = sqrt((u_x * u_x) + (u_y * u_y)); /* compute pressure */ pressure = local_density * c_sq; } /* write to file */ fprintf(fp, "%d %d %.12E %.12E %.12E %.12E %d\n", jj, ii, u_x, u_y, u, pressure, getcellval(ii,jj,obstacles0,obstacles1,params.nyhalf,params.nx)); } } fclose(fp); fp = fopen(AVVELSFILE, "w"); if (fp == NULL) { die("could not open file output file", __LINE__, __FILE__); } for (unsigned int ii = 0; ii < params.maxIters; ii++) { fprintf(fp, "%d:\t%.12E\n", ii, av_vels[ii]); } fclose(fp); return EXIT_SUCCESS; } void die(const char* message, const int line, const char* file) { fprintf(stderr, "Error at line %d of file %s:\n", line, file); fprintf(stderr, "%s\n", message); fflush(stderr); exit(EXIT_FAILURE); } void usage(const char* exe) { fprintf(stderr, "Usage: %s <paramfile> <obstaclefile>\n", exe); exit(EXIT_FAILURE); }
alloc2.c
int main() { int *A; #pragma omp allocate(A) return 0; }
floorplan.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ /* Original code from the Application Kernel Matrix by Cray */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include "app-desc.h" #include "bots.h" #define ROWS 64 #define COLS 64 #define DMAX 64 #define max(a, b) ((a > b) ? a : b) #define min(a, b) ((a < b) ? a : b) int solution = -1; typedef int coor[2]; typedef char ibrd[ROWS][COLS]; typedef char (*pibrd)[COLS]; FILE * inputFile; struct cell { int n; coor *alt; int top; int bot; int lhs; int rhs; int left; int above; int next; }; struct cell * gcells; int MIN_AREA; ibrd BEST_BOARD; coor MIN_FOOTPRINT; int N; /* compute all possible locations for nw corner for cell */ static int starts(int id, int shape, coor *NWS, struct cell *cells) { int i, n, top, bot, lhs, rhs; int rows, cols, left, above; /* size of cell */ rows = cells[id].alt[shape][0]; cols = cells[id].alt[shape][1]; /* the cells to the left and above */ left = cells[id].left; above = cells[id].above; /* if there is a vertical and horizontal dependence */ if ((left >= 0) && (above >= 0)) { top = cells[above].bot + 1; lhs = cells[left].rhs + 1; bot = top + rows; rhs = lhs + cols; /* if footprint of cell touches the cells to the left and above */ if ((top <= cells[left].bot) && (bot >= cells[left].top) && (lhs <= cells[above].rhs) && (rhs >= cells[above].lhs)) { n = 1; NWS[0][0] = top; NWS[0][1] = lhs; } else { n = 0; } /* if there is only a horizontal dependence */ } else if (left >= 0) { /* highest initial row is top of cell to the left - rows */ top = max(cells[left].top - rows + 1, 0); /* lowest initial row is bottom of cell to the left */ bot = min(cells[left].bot, ROWS); n = bot - top + 1; for (i = 0; i < n; i++) { NWS[i][0] = i + top; NWS[i][1] = cells[left].rhs + 1; } } else { /* leftmost initial col is lhs of cell above - cols */ lhs = max(cells[above].lhs - cols + 1, 0); /* rightmost initial col is rhs of cell above */ rhs = min(cells[above].rhs, COLS); n = rhs - lhs + 1; for (i = 0; i < n; i++) { NWS[i][0] = cells[above].bot + 1; NWS[i][1] = i + lhs; } } return (n); } /* lay the cell down on the board in the rectangular space defined by the cells top, bottom, left, and right edges. If the cell can not be layed down, return 0; else 1. */ static int lay_down(int id, ibrd board, struct cell *cells) { int i, j, top, bot, lhs, rhs; top = cells[id].top; bot = cells[id].bot; lhs = cells[id].lhs; rhs = cells[id].rhs; for (i = top; i <= bot; i++) { for (j = lhs; j <= rhs; j++) { if (board[i][j] == 0) board[i][j] = (char)id; else return(0); } } return (1); } #define read_integer(file,var) \ if ( fscanf(file, "%d", &var) == EOF ) {\ bots_message(" Bogus input file\n");\ exit(-1);\ } static void read_inputs() { int i, j, n; read_integer(inputFile,n); N = n; gcells = (struct cell *) malloc((n + 1) * sizeof(struct cell)); gcells[0].n = 0; gcells[0].alt = 0; gcells[0].top = 0; gcells[0].bot = 0; gcells[0].lhs = -1; gcells[0].rhs = -1; gcells[0].left = 0; gcells[0].above = 0; gcells[0].next = 0; for (i = 1; i < n + 1; i++) { read_integer(inputFile, gcells[i].n); gcells[i].alt = (coor *) malloc(gcells[i].n * sizeof(coor)); for (j = 0; j < gcells[i].n; j++) { read_integer(inputFile, gcells[i].alt[j][0]); read_integer(inputFile, gcells[i].alt[j][1]); } read_integer(inputFile, gcells[i].left); read_integer(inputFile, gcells[i].above); read_integer(inputFile, gcells[i].next); } if (!feof(inputFile)) { read_integer(inputFile, solution); } } static void write_outputs() { int i, j; bots_message("Minimum area = %d\n\n", MIN_AREA); for (i = 0; i < MIN_FOOTPRINT[0]; i++) { for (j = 0; j < MIN_FOOTPRINT[1]; j++) { if (BEST_BOARD[i][j] == 0) {bots_message(" ");} else bots_message("%c", 'A' + BEST_BOARD[i][j] - 1); } bots_message("\n"); } } #ifdef MANUAL_CUTOFF static int add_cell_ser (int id, coor FOOTPRINT, ibrd BOARD, struct cell *CELLS) { int i, j, nn, nn2, area; ibrd board; coor footprint, NWS[DMAX]; nn2 = 0; /* for each possible shape */ for (i = 0; i < CELLS[id].n; i++) { /* compute all possible locations for nw corner */ nn = starts(id, i, NWS, CELLS); nn2 += nn; /* for all possible locations */ for (j = 0; j < nn; j++) { struct cell *cells = CELLS; /* extent of shape */ cells[id].top = NWS[j][0]; cells[id].bot = cells[id].top + cells[id].alt[i][0] - 1; cells[id].lhs = NWS[j][1]; cells[id].rhs = cells[id].lhs + cells[id].alt[i][1] - 1; memcpy(board, BOARD, sizeof(ibrd)); /* if the cell cannot be layed down, prune search */ if (! lay_down(id, board, cells)) { bots_debug("Chip %d, shape %d does not fit\n", id, i); goto _end; } /* calculate new footprint of board and area of footprint */ footprint[0] = max(FOOTPRINT[0], cells[id].bot+1); footprint[1] = max(FOOTPRINT[1], cells[id].rhs+1); area = footprint[0] * footprint[1]; /* if last cell */ if (cells[id].next == 0) { /* if area is minimum, update global values */ if (area < MIN_AREA) { #pragma omp critical if (area < MIN_AREA) { MIN_AREA = area; MIN_FOOTPRINT[0] = footprint[0]; MIN_FOOTPRINT[1] = footprint[1]; memcpy(BEST_BOARD, board, sizeof(ibrd)); bots_debug("N %d\n", MIN_AREA); } } /* if area is less than best area */ } else if (area < MIN_AREA) { #pragma omp atomic nn2 += add_cell_ser(cells[id].next, footprint, board,cells); /* if area is greater than or equal to best area, prune search */ } else { bots_debug("T %d, %d\n", area, MIN_AREA); } _end:; } } return nn2; } #endif #if defined(IF_CUTOFF) static int add_cell(int id, coor FOOTPRINT, ibrd BOARD, struct cell *CELLS,int level) { int i, j, nn, area, nnc, nnl; ibrd board; coor footprint, NWS[DMAX]; nnc = nnl = 0; /* for each possible shape */ for (i = 0; i < CELLS[id].n; i++) { /* compute all possible locations for nw corner */ nn = starts(id, i, NWS, CELLS); nnl += nn; /* for all possible locations */ for (j = 0; j < nn; j++) { #pragma omp task untied private(board, footprint,area) \ firstprivate(NWS,i,j,id,nn,level) \ shared(FOOTPRINT,BOARD,CELLS,MIN_AREA,MIN_FOOTPRINT,N,BEST_BOARD,nnc,bots_verbose_mode) \ if(level<bots_cutoff_value) { struct cell cells[N+1]; memcpy(cells,CELLS,sizeof(struct cell)*(N+1)); /* extent of shape */ cells[id].top = NWS[j][0]; cells[id].bot = cells[id].top + cells[id].alt[i][0] - 1; cells[id].lhs = NWS[j][1]; cells[id].rhs = cells[id].lhs + cells[id].alt[i][1] - 1; memcpy(board, BOARD, sizeof(ibrd)); /* if the cell cannot be layed down, prune search */ if (! lay_down(id, board, cells)) { bots_debug("Chip %d, shape %d does not fit\n", id, i); goto _end; } /* calculate new footprint of board and area of footprint */ footprint[0] = max(FOOTPRINT[0], cells[id].bot+1); footprint[1] = max(FOOTPRINT[1], cells[id].rhs+1); area = footprint[0] * footprint[1]; /* if last cell */ if (cells[id].next == 0) { /* if area is minimum, update global values */ if (area < MIN_AREA) { #pragma omp critical if (area < MIN_AREA) { MIN_AREA = area; MIN_FOOTPRINT[0] = footprint[0]; MIN_FOOTPRINT[1] = footprint[1]; memcpy(BEST_BOARD, board, sizeof(ibrd)); bots_debug("N %d\n", MIN_AREA); } } /* if area is less than best area */ } else if (area < MIN_AREA) { #pragma omp atomic nnc += add_cell(cells[id].next, footprint, board,cells,level+1); /* if area is greater than or equal to best area, prune search */ } else { bots_debug("T %d, %d\n", area, MIN_AREA); } _end:; } } } #pragma omp taskwait return nnc+nnl; } #elif defined(FINAL_CUTOFF) static int add_cell(int id, coor FOOTPRINT, ibrd BOARD, struct cell *CELLS,int level) { int i, j, nn, area, nnc, nnl; coor footprint, NWS[DMAX]; nnc = nnl = 0; /* for each possible shape */ for (i = 0; i < CELLS[id].n; i++) { /* compute all possible locations for nw corner */ nn = starts(id, i, NWS, CELLS); nnl += nn; /* for all possible locations */ for (j = 0; j < nn; j++) { #pragma omp task untied private(footprint,area) \ firstprivate(NWS,i,j,id,nn,level,bots_cutoff_value) \ shared(FOOTPRINT,BOARD,CELLS,MIN_AREA,MIN_FOOTPRINT,N,BEST_BOARD,nnc,bots_verbose_mode) \ final(level >= bots_cutoff_value) { ibrd board; struct cell *cells; if ( omp_in_final() && level > bots_cutoff_value ) { cells = CELLS; } else { cells = alloca(sizeof(struct cell)*(N+1)); memcpy(cells,CELLS,sizeof(struct cell)*(N+1)); } /* extent of shape */ cells[id].top = NWS[j][0]; cells[id].bot = cells[id].top + cells[id].alt[i][0] - 1; cells[id].lhs = NWS[j][1]; cells[id].rhs = cells[id].lhs + cells[id].alt[i][1] - 1; memcpy(board, BOARD, sizeof(ibrd)); /* if the cell cannot be layed down, prune search */ if (! lay_down(id, board, cells)) { bots_debug("Chip %d, shape %d does not fit\n", id, i); goto _end; } /* calculate new footprint of board and area of footprint */ footprint[0] = max(FOOTPRINT[0], cells[id].bot+1); footprint[1] = max(FOOTPRINT[1], cells[id].rhs+1); area = footprint[0] * footprint[1]; /* if last cell */ if (cells[id].next == 0) { /* if area is minimum, update global values */ if (area < MIN_AREA) { #pragma omp critical if (area < MIN_AREA) { MIN_AREA = area; MIN_FOOTPRINT[0] = footprint[0]; MIN_FOOTPRINT[1] = footprint[1]; memcpy(BEST_BOARD, board, sizeof(ibrd)); bots_debug("N %d\n", MIN_AREA); } } /* if area is less than best area */ } else if (area < MIN_AREA) { #pragma omp atomic nnc += add_cell(cells[id].next, footprint, board,cells,level+1); /* if area is greater than or equal to best area, prune search */ } else { bots_debug("T %d, %d\n", area, MIN_AREA); } _end:; } } } #pragma omp taskwait return nnc+nnl; } #elif defined(MANUAL_CUTOFF) static int add_cell(int id, coor FOOTPRINT, ibrd BOARD, struct cell *CELLS,int level) { int i, j, nn, area, nnc, nnl; ibrd board; coor footprint, NWS[DMAX]; nnc = nnl = 0; /* for each possible shape */ for (i = 0; i < CELLS[id].n; i++) { /* compute all possible locations for nw corner */ nn = starts(id, i, NWS, CELLS); nnl += nn; /* for all possible locations */ for (j = 0; j < nn; j++) { #pragma omp task untied private(board, footprint,area) \ firstprivate(NWS,i,j,id,nn,level,bots_cutoff_value) shared(nnc) \ shared(FOOTPRINT,BOARD,CELLS,MIN_AREA,MIN_FOOTPRINT,N,BEST_BOARD,bots_verbose_mode) { struct cell *cells; cells = alloca(sizeof(struct cell)*(N+1)); memcpy(cells,CELLS,sizeof(struct cell)*(N+1)); /* extent of shape */ cells[id].top = NWS[j][0]; cells[id].bot = cells[id].top + cells[id].alt[i][0] - 1; cells[id].lhs = NWS[j][1]; cells[id].rhs = cells[id].lhs + cells[id].alt[i][1] - 1; memcpy(board, BOARD, sizeof(ibrd)); /* if the cell cannot be layed down, prune search */ if (! lay_down(id, board, cells)) { bots_debug("Chip %d, shape %d does not fit\n", id, i); goto _end; } /* calculate new footprint of board and area of footprint */ footprint[0] = max(FOOTPRINT[0], cells[id].bot+1); footprint[1] = max(FOOTPRINT[1], cells[id].rhs+1); area = footprint[0] * footprint[1]; /* if last cell */ if (cells[id].next == 0) { /* if area is minimum, update global values */ if (area < MIN_AREA) { #pragma omp critical if (area < MIN_AREA) { MIN_AREA = area; MIN_FOOTPRINT[0] = footprint[0]; MIN_FOOTPRINT[1] = footprint[1]; memcpy(BEST_BOARD, board, sizeof(ibrd)); bots_debug("N %d\n", MIN_AREA); } } /* if area is less than best area */ } else if (area < MIN_AREA) { if(level+1 < bots_cutoff_value ) { #pragma omp atomic nnc += add_cell(cells[id].next, footprint, board,cells,level+1); } else { #pragma omp atomic nnc += add_cell_ser(cells[id].next, footprint, board,cells); } /* if area is greater than or equal to best area, prune search */ } else { bots_debug("T %d, %d\n", area, MIN_AREA); } _end:; } } } #pragma omp taskwait return nnc+nnl; } #else static int add_cell(int id, coor FOOTPRINT, ibrd BOARD, struct cell *CELLS) { int i, j, nn, area, nnc,nnl; ibrd board; coor footprint, NWS[DMAX]; nnc = nnl = 0; /* for each possible shape */ for (i = 0; i < CELLS[id].n; i++) { /* compute all possible locations for nw corner */ nn = starts(id, i, NWS, CELLS); nnl += nn; /* for all possible locations */ for (j = 0; j < nn; j++) { #pragma omp task untied private(board, footprint,area) \ firstprivate(NWS,i,j,id,nn) \ shared(FOOTPRINT,BOARD,CELLS,MIN_AREA,MIN_FOOTPRINT,N,BEST_BOARD,nnc,bots_verbose_mode) { struct cell cells[N+1]; memcpy(cells,CELLS,sizeof(struct cell)*(N+1)); /* extent of shape */ cells[id].top = NWS[j][0]; cells[id].bot = cells[id].top + cells[id].alt[i][0] - 1; cells[id].lhs = NWS[j][1]; cells[id].rhs = cells[id].lhs + cells[id].alt[i][1] - 1; memcpy(board, BOARD, sizeof(ibrd)); /* if the cell cannot be layed down, prune search */ if (! lay_down(id, board, cells)) { bots_debug("Chip %d, shape %d does not fit\n", id, i); goto _end; } /* calculate new footprint of board and area of footprint */ footprint[0] = max(FOOTPRINT[0], cells[id].bot+1); footprint[1] = max(FOOTPRINT[1], cells[id].rhs+1); area = footprint[0] * footprint[1]; /* if last cell */ if (cells[id].next == 0) { /* if area is minimum, update global values */ if (area < MIN_AREA) { #pragma omp critical if (area < MIN_AREA) { MIN_AREA = area; MIN_FOOTPRINT[0] = footprint[0]; MIN_FOOTPRINT[1] = footprint[1]; memcpy(BEST_BOARD, board, sizeof(ibrd)); bots_debug("N %d\n", MIN_AREA); } } /* if area is less than best area */ } else if (area < MIN_AREA) { #pragma omp atomic nnc += add_cell(cells[id].next, footprint, board,cells); /* if area is greater than or equal to best area, prune search */ } else { bots_debug("T %d, %d\n", area, MIN_AREA); } _end:; } } } #pragma omp taskwait return nnc+nnl; } #endif ibrd board; void floorplan_init (char *filename) { int i,j; inputFile = fopen(filename, "r"); if(NULL == inputFile) { bots_message("Couldn't open %s file for reading\n", filename); exit(1); } /* read input file and initialize global minimum area */ read_inputs(); MIN_AREA = ROWS * COLS; /* initialize board is empty */ for (i = 0; i < ROWS; i++) for (j = 0; j < COLS; j++) board[i][j] = 0; } void compute_floorplan (void) { coor footprint; /* footprint of initial board is zero */ footprint[0] = 0; footprint[1] = 0; bots_message("Computing floorplan "); #pragma omp parallel { #pragma omp single #if defined(MANUAL_CUTOFF) || defined(IF_CUTOFF) || defined(FINAL_CUTOFF) bots_number_of_tasks = add_cell(1, footprint, board, gcells,0); #else bots_number_of_tasks = add_cell(1, footprint, board, gcells); #endif } bots_message(" completed!\n"); } void floorplan_end (void) { /* write results */ write_outputs(); } int floorplan_verify (void) { if (solution != -1 ) return MIN_AREA == solution ? BOTS_RESULT_SUCCESSFUL : BOTS_RESULT_UNSUCCESSFUL; else return BOTS_RESULT_NA; }
GB_unop__identity_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__(none)) // op(A') function: GB (_unop_tran__identity_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ #if 0 GrB_Info GB (_unop_apply__(none)) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
JeeIOrbitalSoA.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory // // File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory ////////////////////////////////////////////////////////////////////////////////////// #ifndef QMCPLUSPLUS_EEIJASTROW_OPTIMIZED_SOA_H #define QMCPLUSPLUS_EEIJASTROW_OPTIMIZED_SOA_H #include "Configuration.h" #if !defined(QMC_BUILD_SANDBOX_ONLY) #include "QMCWaveFunctions/WaveFunctionComponent.h" #endif #include "Particle/DistanceTableData.h" #include "CPU/SIMD/aligned_allocator.hpp" #include "CPU/SIMD/algorithm.hpp" #include <map> #include <numeric> namespace qmcplusplus { /** @ingroup WaveFunctionComponent * @brief Specialization for three-body Jastrow function using multiple functors * *Each pair-type can have distinct function \f$u(r_{ij})\f$. *For electrons, distinct pair correlation functions are used *for spins up-up/down-down and up-down/down-up. */ template<class FT> class JeeIOrbitalSoA : public WaveFunctionComponent { ///type of each component U, dU, d2U; using valT = typename FT::real_type; ///element position type using posT = TinyVector<valT, OHMMS_DIM>; ///use the same container using DistRow = DistanceTableData::DistRow; using DisplRow = DistanceTableData::DisplRow; ///table index for el-el const int ee_Table_ID_; ///table index for i-el const int ei_Table_ID_; //nuber of particles int Nelec, Nion; ///number of particles + padded size_t Nelec_padded; //number of groups of the target particleset int eGroups, iGroups; ///reference to the sources (ions) const ParticleSet& Ions; ///diff value RealType DiffVal; ///\f$Uat[i] = sum_(j) u_{i,j}\f$ Vector<valT> Uat, oldUk, newUk; ///\f$dUat[i] = sum_(j) du_{i,j}\f$ using gContainer_type = VectorSoaContainer<valT, OHMMS_DIM>; gContainer_type dUat, olddUk, newdUk; ///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$ Vector<valT> d2Uat, oldd2Uk, newd2Uk; /// current values during PbyP valT cur_Uat, cur_d2Uat; posT cur_dUat, dUat_temp; ///container for the Jastrow functions Array<FT*, 3> F; std::map<std::string, FT*> J3Unique; //YYYY std::map<FT*, int> J3UniqueIndex; /// the cutoff for e-I pairs std::vector<valT> Ion_cutoff; /// the electrons around ions within the cutoff radius, grouped by species Array<std::vector<int>, 2> elecs_inside; Array<std::vector<valT>, 2> elecs_inside_dist; Array<std::vector<posT>, 2> elecs_inside_displ; /// the ids of ions within the cutoff radius of an electron on which a move is proposed std::vector<int> ions_nearby_old, ions_nearby_new; /// work buffer size size_t Nbuffer; /// compressed distances aligned_vector<valT> Distjk_Compressed, DistkI_Compressed, DistjI_Compressed; std::vector<int> DistIndice_k; /// compressed displacements gContainer_type Disp_jk_Compressed, Disp_jI_Compressed, Disp_kI_Compressed; /// work result buffer VectorSoaContainer<valT, 9> mVGL; // Used for evaluating derivatives with respect to the parameters int NumVars; Array<std::pair<int, int>, 3> VarOffset; Vector<RealType> dLogPsi; Array<PosType, 2> gradLogPsi; Array<RealType, 2> lapLogPsi; // Temporary store for parameter derivatives of functor // The first index is the functor index in J3Unique. The second is the parameter index w.r.t. to that // functor std::vector<std::vector<RealType>> du_dalpha; std::vector<std::vector<PosType>> dgrad_dalpha; std::vector<std::vector<Tensor<RealType, 3>>> dhess_dalpha; public: ///alias FuncType using FuncType = FT; JeeIOrbitalSoA(const std::string& obj_name, const ParticleSet& ions, ParticleSet& elecs, bool is_master = false) : WaveFunctionComponent("JeeIOrbitalSoA", obj_name), ee_Table_ID_(elecs.addTable(elecs)), ei_Table_ID_(elecs.addTable(ions, true)), Ions(ions), NumVars(0) { if (myName.empty()) throw std::runtime_error("JeeIOrbitalSoA object name cannot be empty!"); init(elecs); } ~JeeIOrbitalSoA() {} WaveFunctionComponentPtr makeClone(ParticleSet& elecs) const { JeeIOrbitalSoA<FT>* eeIcopy = new JeeIOrbitalSoA<FT>(myName, Ions, elecs, false); std::map<const FT*, FT*> fcmap; for (int iG = 0; iG < iGroups; iG++) for (int eG1 = 0; eG1 < eGroups; eG1++) for (int eG2 = 0; eG2 < eGroups; eG2++) { if (F(iG, eG1, eG2) == 0) continue; typename std::map<const FT*, FT*>::iterator fit = fcmap.find(F(iG, eG1, eG2)); if (fit == fcmap.end()) { FT* fc = new FT(*F(iG, eG1, eG2)); eeIcopy->addFunc(iG, eG1, eG2, fc); fcmap[F(iG, eG1, eG2)] = fc; } } // Ye: I don't like the following memory allocated by default. eeIcopy->myVars.clear(); eeIcopy->myVars.insertFrom(myVars); eeIcopy->NumVars = NumVars; eeIcopy->dLogPsi.resize(NumVars); eeIcopy->gradLogPsi.resize(NumVars, Nelec); eeIcopy->lapLogPsi.resize(NumVars, Nelec); eeIcopy->VarOffset = VarOffset; eeIcopy->Optimizable = Optimizable; return eeIcopy; } void init(ParticleSet& p) { Nelec = p.getTotalNum(); Nelec_padded = getAlignedSize<valT>(Nelec); Nion = Ions.getTotalNum(); iGroups = Ions.getSpeciesSet().getTotalNum(); eGroups = p.groups(); Uat.resize(Nelec); dUat.resize(Nelec); d2Uat.resize(Nelec); oldUk.resize(Nelec); olddUk.resize(Nelec); oldd2Uk.resize(Nelec); newUk.resize(Nelec); newdUk.resize(Nelec); newd2Uk.resize(Nelec); F.resize(iGroups, eGroups, eGroups); F = nullptr; elecs_inside.resize(eGroups, Nion); elecs_inside_dist.resize(eGroups, Nion); elecs_inside_displ.resize(eGroups, Nion); ions_nearby_old.resize(Nion); ions_nearby_new.resize(Nion); Ion_cutoff.resize(Nion, 0.0); //initialize buffers Nbuffer = Nelec; mVGL.resize(Nbuffer); Distjk_Compressed.resize(Nbuffer); DistjI_Compressed.resize(Nbuffer); DistkI_Compressed.resize(Nbuffer); Disp_jk_Compressed.resize(Nbuffer); Disp_jI_Compressed.resize(Nbuffer); Disp_kI_Compressed.resize(Nbuffer); DistIndice_k.resize(Nbuffer); } void initUnique() { typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); du_dalpha.resize(J3Unique.size()); dgrad_dalpha.resize(J3Unique.size()); dhess_dalpha.resize(J3Unique.size()); int ifunc = 0; while (it != it_end) { J3UniqueIndex[it->second] = ifunc; FT& functor = *(it->second); int numParams = functor.getNumParameters(); du_dalpha[ifunc].resize(numParams); dgrad_dalpha[ifunc].resize(numParams); dhess_dalpha[ifunc].resize(numParams); ++it; ifunc++; } } void addFunc(int iSpecies, int eSpecies1, int eSpecies2, FT* j) { if (eSpecies1 == eSpecies2) { //if only up-up is specified, assume spin-unpolarized correlations if (eSpecies1 == 0) for (int eG1 = 0; eG1 < eGroups; eG1++) for (int eG2 = 0; eG2 < eGroups; eG2++) { if (F(iSpecies, eG1, eG2) == 0) F(iSpecies, eG1, eG2) = j; } } else { F(iSpecies, eSpecies1, eSpecies2) = j; F(iSpecies, eSpecies2, eSpecies1) = j; } if (j) { RealType rcut = 0.5 * j->cutoff_radius; for (int i = 0; i < Nion; i++) if (Ions.GroupID[i] == iSpecies) Ion_cutoff[i] = rcut; } else { APP_ABORT("JeeIOrbitalSoA::addFunc Jastrow function pointer is NULL"); } std::stringstream aname; aname << iSpecies << "_" << eSpecies1 << "_" << eSpecies2; J3Unique[aname.str()] = j; initUnique(); } /** check that correlation information is complete */ void check_complete() { //check that correlation pointers are either all 0 or all assigned bool complete = true; for (int i = 0; i < iGroups; ++i) { int nfilled = 0; bool partial; for (int e1 = 0; e1 < eGroups; ++e1) for (int e2 = 0; e2 < eGroups; ++e2) if (F(i, e1, e2) != 0) nfilled++; partial = nfilled > 0 && nfilled < eGroups * eGroups; if (partial) app_log() << "J3 eeI is missing correlation for ion " << i << std::endl; complete = complete && !partial; } if (!complete) { APP_ABORT("JeeIOrbitalSoA::check_complete J3 eeI is missing correlation components\n see preceding messages " "for details"); } //first set radii for (int i = 0; i < Nion; ++i) { FT* f = F(Ions.GroupID[i], 0, 0); if (f != 0) Ion_cutoff[i] = .5 * f->cutoff_radius; } //then check radii bool all_radii_match = true; for (int i = 0; i < iGroups; ++i) { if (F(i, 0, 0) != 0) { bool radii_match = true; RealType rcut = F(i, 0, 0)->cutoff_radius; for (int e1 = 0; e1 < eGroups; ++e1) for (int e2 = 0; e2 < eGroups; ++e2) radii_match = radii_match && F(i, e1, e2)->cutoff_radius == rcut; if (!radii_match) app_log() << "eeI functors for ion species " << i << " have different radii" << std::endl; all_radii_match = all_radii_match && radii_match; } } if (!all_radii_match) { APP_ABORT("JeeIOrbitalSoA::check_radii J3 eeI are inconsistent for some ion species\n see preceding messages " "for details"); } } /** check in an optimizable parameter * @param o a super set of optimizable variables */ void checkInVariables(opt_variables_type& active) { myVars.clear(); typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); while (it != it_end) { (*it).second->checkInVariables(active); (*it).second->checkInVariables(myVars); ++it; } } /** check out optimizable variables */ void checkOutVariables(const opt_variables_type& active) { myVars.clear(); typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); while (it != it_end) { (*it).second->myVars.getIndex(active); myVars.insertFrom((*it).second->myVars); ++it; } myVars.getIndex(active); NumVars = myVars.size(); if (NumVars) { dLogPsi.resize(NumVars); gradLogPsi.resize(NumVars, Nelec); lapLogPsi.resize(NumVars, Nelec); VarOffset.resize(iGroups, eGroups, eGroups); int varoffset = myVars.Index[0]; for (int ig = 0; ig < iGroups; ig++) for (int jg = 0; jg < eGroups; jg++) for (int kg = 0; kg < eGroups; kg++) { FT* func_ijk = F(ig, jg, kg); if (func_ijk == nullptr) continue; VarOffset(ig, jg, kg).first = func_ijk->myVars.Index.front() - varoffset; VarOffset(ig, jg, kg).second = func_ijk->myVars.Index.size() + VarOffset(ig, jg, kg).first; } } } ///reset the value of all the unique Two-Body Jastrow functions void resetParameters(const opt_variables_type& active) { if (!Optimizable) return; typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); while (it != it_end) { (*it++).second->resetParameters(active); } for (int i = 0; i < myVars.size(); ++i) { int ii = myVars.Index[i]; if (ii >= 0) myVars[i] = active[ii]; } } /** print the state, e.g., optimizables */ void reportStatus(std::ostream& os) { typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); while (it != it_end) { (*it).second->myVars.print(os); ++it; } } void build_compact_list(ParticleSet& P) { const auto& eI_dists = P.getDistTable(ei_Table_ID_).getDistances(); const auto& eI_displs = P.getDistTable(ei_Table_ID_).getDisplacements(); for (int iat = 0; iat < Nion; ++iat) for (int jg = 0; jg < eGroups; ++jg) { elecs_inside(jg, iat).clear(); elecs_inside_dist(jg, iat).clear(); elecs_inside_displ(jg, iat).clear(); } for (int jg = 0; jg < eGroups; ++jg) for (int jel = P.first(jg); jel < P.last(jg); jel++) for (int iat = 0; iat < Nion; ++iat) if (eI_dists[jel][iat] < Ion_cutoff[iat]) { elecs_inside(jg, iat).push_back(jel); elecs_inside_dist(jg, iat).push_back(eI_dists[jel][iat]); elecs_inside_displ(jg, iat).push_back(eI_displs[jel][iat]); } } LogValueType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L) { return evaluateGL(P, G, L, true); } PsiValueType ratio(ParticleSet& P, int iat) { UpdateMode = ORB_PBYP_RATIO; const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); cur_Uat = computeU(P, iat, P.GroupID[iat], eI_table.getTempDists(), ee_table.getTempDists(), ions_nearby_new); DiffVal = Uat[iat] - cur_Uat; return std::exp(static_cast<PsiValueType>(DiffVal)); } void evaluateRatios(const VirtualParticleSet& VP, std::vector<ValueType>& ratios) { for (int k = 0; k < ratios.size(); ++k) ratios[k] = std::exp(Uat[VP.refPtcl] - computeU(VP.refPS, VP.refPtcl, VP.refPS.GroupID[VP.refPtcl], VP.getDistTable(ei_Table_ID_).getDistRow(k), VP.getDistTable(ee_Table_ID_).getDistRow(k), ions_nearby_old)); } void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios) { const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const auto& eI_dists = eI_table.getDistances(); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); for (int jg = 0; jg < eGroups; ++jg) { const valT sumU = computeU(P, -1, jg, eI_table.getTempDists(), ee_table.getTempDists(), ions_nearby_new); for (int j = P.first(jg); j < P.last(jg); ++j) { // remove self-interaction valT Uself(0); for (int iat = 0; iat < Nion; ++iat) { const valT& r_Ij = eI_table.getTempDists()[iat]; const valT& r_Ik = eI_dists[j][iat]; if (r_Ij < Ion_cutoff[iat] && r_Ik < Ion_cutoff[iat]) { const int ig = Ions.GroupID[iat]; Uself += F(ig, jg, jg)->evaluate(ee_table.getTempDists()[j], r_Ij, r_Ik); } } ratios[j] = std::exp(Uat[j] + Uself - sumU); } } } GradType evalGrad(ParticleSet& P, int iat) { return GradType(dUat[iat]); } PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat) { UpdateMode = ORB_PBYP_PARTIAL; const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); computeU3(P, iat, eI_table.getTempDists(), eI_table.getTempDispls(), ee_table.getTempDists(), ee_table.getTempDispls(), cur_Uat, cur_dUat, cur_d2Uat, newUk, newdUk, newd2Uk, ions_nearby_new); DiffVal = Uat[iat] - cur_Uat; grad_iat += cur_dUat; return std::exp(static_cast<PsiValueType>(DiffVal)); } inline void restore(int iat) {} void acceptMove(ParticleSet& P, int iat, bool safe_to_delay = false) { const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); // get the old value, grad, lapl computeU3(P, iat, eI_table.getDistRow(iat), eI_table.getDisplRow(iat), ee_table.getOldDists(), ee_table.getOldDispls(), Uat[iat], dUat_temp, d2Uat[iat], oldUk, olddUk, oldd2Uk, ions_nearby_old); if (UpdateMode == ORB_PBYP_RATIO) { //ratio-only during the move; need to compute derivatives computeU3(P, iat, eI_table.getTempDists(), eI_table.getTempDispls(), ee_table.getTempDists(), ee_table.getTempDispls(), cur_Uat, cur_dUat, cur_d2Uat, newUk, newdUk, newd2Uk, ions_nearby_new); } #pragma omp simd for (int jel = 0; jel < Nelec; jel++) { Uat[jel] += newUk[jel] - oldUk[jel]; d2Uat[jel] += newd2Uk[jel] - oldd2Uk[jel]; } for (int idim = 0; idim < OHMMS_DIM; ++idim) { valT* restrict save_g = dUat.data(idim); const valT* restrict new_g = newdUk.data(idim); const valT* restrict old_g = olddUk.data(idim); #pragma omp simd aligned(save_g, new_g, old_g) for (int jel = 0; jel < Nelec; jel++) save_g[jel] += new_g[jel] - old_g[jel]; } LogValue += Uat[iat] - cur_Uat; Uat[iat] = cur_Uat; dUat(iat) = cur_dUat; d2Uat[iat] = cur_d2Uat; const int ig = P.GroupID[iat]; // update compact list elecs_inside // if the old position exists in elecs_inside for (int iind = 0; iind < ions_nearby_old.size(); iind++) { int jat = ions_nearby_old[iind]; auto iter = std::find(elecs_inside(ig, jat).begin(), elecs_inside(ig, jat).end(), iat); auto iter_dist = elecs_inside_dist(ig, jat).begin() + std::distance(elecs_inside(ig, jat).begin(), iter); auto iter_displ = elecs_inside_displ(ig, jat).begin() + std::distance(elecs_inside(ig, jat).begin(), iter); // sentinel code #ifndef NDEBUG if (iter == elecs_inside(ig, jat).end()) { std::cerr << std::setprecision(std::numeric_limits<valT>::digits10 + 1) << "updating electron iat = " << iat << " near ion " << jat << " dist " << eI_table.getDistRow(iat)[jat] << std::endl; throw std::runtime_error("BUG electron not found in elecs_inside"); } else if (std::abs(eI_table.getDistRow(iat)[jat] - *iter_dist) >= std::numeric_limits<valT>::epsilon()) { std::cerr << std::setprecision(std::numeric_limits<valT>::digits10 + 1) << "inconsistent electron iat = " << iat << " near ion " << jat << " dist " << eI_table.getDistRow(iat)[jat] << " stored value = " << *iter_dist << std::endl; throw std::runtime_error("BUG eI distance stored value elecs_inside_dist not matching distance table"); } #endif if (eI_table.getTempDists()[jat] < Ion_cutoff[jat]) // the new position is still inside { *iter_dist = eI_table.getTempDists()[jat]; *iter_displ = eI_table.getTempDispls()[jat]; *std::find(ions_nearby_new.begin(), ions_nearby_new.end(), jat) = -1; } else { *iter = elecs_inside(ig, jat).back(); elecs_inside(ig, jat).pop_back(); *iter_dist = elecs_inside_dist(ig, jat).back(); elecs_inside_dist(ig, jat).pop_back(); *iter_displ = elecs_inside_displ(ig, jat).back(); elecs_inside_displ(ig, jat).pop_back(); } } // if the old position doesn't exist in elecs_inside but the new position do for (int iind = 0; iind < ions_nearby_new.size(); iind++) { int jat = ions_nearby_new[iind]; if (jat >= 0) { elecs_inside(ig, jat).push_back(iat); elecs_inside_dist(ig, jat).push_back(eI_table.getTempDists()[jat]); elecs_inside_displ(ig, jat).push_back(eI_table.getTempDispls()[jat]); } } } inline void recompute(ParticleSet& P) { const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); build_compact_list(P); for (int jel = 0; jel < Nelec; ++jel) { computeU3(P, jel, eI_table.getDistRow(jel), eI_table.getDisplRow(jel), ee_table.getDistRow(jel), ee_table.getDisplRow(jel), Uat[jel], dUat_temp, d2Uat[jel], newUk, newdUk, newd2Uk, ions_nearby_new, true); dUat(jel) = dUat_temp; // add the contribution from the upper triangle #pragma omp simd for (int kel = 0; kel < jel; kel++) { Uat[kel] += newUk[kel]; d2Uat[kel] += newd2Uk[kel]; } for (int idim = 0; idim < OHMMS_DIM; ++idim) { valT* restrict save_g = dUat.data(idim); const valT* restrict new_g = newdUk.data(idim); #pragma omp simd aligned(save_g, new_g) for (int kel = 0; kel < jel; kel++) save_g[kel] += new_g[kel]; } } } inline valT computeU(const ParticleSet& P, int jel, int jg, const DistRow& distjI, const DistRow& distjk, std::vector<int>& ions_nearby) { ions_nearby.clear(); for (int iat = 0; iat < Nion; ++iat) if (distjI[iat] < Ion_cutoff[iat]) ions_nearby.push_back(iat); valT Uj = valT(0); for (int kg = 0; kg < eGroups; ++kg) { int kel_counter = 0; for (int iind = 0; iind < ions_nearby.size(); ++iind) { const int iat = ions_nearby[iind]; const int ig = Ions.GroupID[iat]; const valT r_jI = distjI[iat]; for (int kind = 0; kind < elecs_inside(kg, iat).size(); kind++) { const int kel = elecs_inside(kg, iat)[kind]; if (kel != jel) { DistkI_Compressed[kel_counter] = elecs_inside_dist(kg, iat)[kind]; Distjk_Compressed[kel_counter] = distjk[kel]; DistjI_Compressed[kel_counter] = r_jI; kel_counter++; if (kel_counter == Nbuffer) { const FT& feeI(*F(ig, jg, kg)); Uj += feeI.evaluateV(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data()); kel_counter = 0; } } } if ((iind + 1 == ions_nearby.size() || ig != Ions.GroupID[ions_nearby[iind + 1]]) && kel_counter > 0) { const FT& feeI(*F(ig, jg, kg)); Uj += feeI.evaluateV(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data()); kel_counter = 0; } } } return Uj; } inline void computeU3_engine(const ParticleSet& P, const FT& feeI, int kel_counter, valT& Uj, posT& dUj, valT& d2Uj, Vector<valT>& Uk, gContainer_type& dUk, Vector<valT>& d2Uk) { constexpr valT czero(0); constexpr valT cone(1); constexpr valT ctwo(2); constexpr valT lapfac = OHMMS_DIM - cone; valT* restrict val = mVGL.data(0); valT* restrict gradF0 = mVGL.data(1); valT* restrict gradF1 = mVGL.data(2); valT* restrict gradF2 = mVGL.data(3); valT* restrict hessF00 = mVGL.data(4); valT* restrict hessF11 = mVGL.data(5); valT* restrict hessF22 = mVGL.data(6); valT* restrict hessF01 = mVGL.data(7); valT* restrict hessF02 = mVGL.data(8); feeI.evaluateVGL(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data(), val, gradF0, gradF1, gradF2, hessF00, hessF11, hessF22, hessF01, hessF02); // compute the contribution to jel, kel Uj = simd::accumulate_n(val, kel_counter, Uj); valT gradF0_sum = simd::accumulate_n(gradF0, kel_counter, czero); valT gradF1_sum = simd::accumulate_n(gradF1, kel_counter, czero); valT hessF00_sum = simd::accumulate_n(hessF00, kel_counter, czero); valT hessF11_sum = simd::accumulate_n(hessF11, kel_counter, czero); d2Uj -= hessF00_sum + hessF11_sum + lapfac * (gradF0_sum + gradF1_sum); std::fill_n(hessF11, kel_counter, czero); for (int idim = 0; idim < OHMMS_DIM; ++idim) { valT* restrict jk = Disp_jk_Compressed.data(idim); valT* restrict jI = Disp_jI_Compressed.data(idim); valT* restrict kI = Disp_kI_Compressed.data(idim); valT dUj_x(0); #pragma omp simd aligned(gradF0, gradF1, gradF2, hessF11, jk, jI, kI) reduction(+ : dUj_x) for (int kel_index = 0; kel_index < kel_counter; kel_index++) { // recycle hessF11 hessF11[kel_index] += kI[kel_index] * jk[kel_index]; dUj_x += gradF1[kel_index] * jI[kel_index]; // destroy jk, kI const valT temp = jk[kel_index] * gradF0[kel_index]; dUj_x += temp; jk[kel_index] *= jI[kel_index]; kI[kel_index] = kI[kel_index] * gradF2[kel_index] - temp; } dUj[idim] += dUj_x; valT* restrict jk0 = Disp_jk_Compressed.data(0); if (idim > 0) { #pragma omp simd aligned(jk, jk0) for (int kel_index = 0; kel_index < kel_counter; kel_index++) jk0[kel_index] += jk[kel_index]; } valT* restrict dUk_x = dUk.data(idim); for (int kel_index = 0; kel_index < kel_counter; kel_index++) dUk_x[DistIndice_k[kel_index]] += kI[kel_index]; } valT sum(0); valT* restrict jk0 = Disp_jk_Compressed.data(0); #pragma omp simd aligned(jk0, hessF01) reduction(+ : sum) for (int kel_index = 0; kel_index < kel_counter; kel_index++) sum += hessF01[kel_index] * jk0[kel_index]; d2Uj -= ctwo * sum; #pragma omp simd aligned(hessF00, hessF22, gradF0, gradF2, hessF02, hessF11) for (int kel_index = 0; kel_index < kel_counter; kel_index++) hessF00[kel_index] = hessF00[kel_index] + hessF22[kel_index] + lapfac * (gradF0[kel_index] + gradF2[kel_index]) - ctwo * hessF02[kel_index] * hessF11[kel_index]; for (int kel_index = 0; kel_index < kel_counter; kel_index++) { const int kel = DistIndice_k[kel_index]; Uk[kel] += val[kel_index]; d2Uk[kel] -= hessF00[kel_index]; } } inline void computeU3(const ParticleSet& P, int jel, const DistRow& distjI, const DisplRow& displjI, const DistRow& distjk, const DisplRow& displjk, valT& Uj, posT& dUj, valT& d2Uj, Vector<valT>& Uk, gContainer_type& dUk, Vector<valT>& d2Uk, std::vector<int>& ions_nearby, bool triangle = false) { constexpr valT czero(0); Uj = czero; dUj = posT(); d2Uj = czero; const int jg = P.GroupID[jel]; const int kelmax = triangle ? jel : Nelec; std::fill_n(Uk.data(), kelmax, czero); std::fill_n(d2Uk.data(), kelmax, czero); for (int idim = 0; idim < OHMMS_DIM; ++idim) std::fill_n(dUk.data(idim), kelmax, czero); ions_nearby.clear(); for (int iat = 0; iat < Nion; ++iat) if (distjI[iat] < Ion_cutoff[iat]) ions_nearby.push_back(iat); for (int kg = 0; kg < eGroups; ++kg) { int kel_counter = 0; for (int iind = 0; iind < ions_nearby.size(); ++iind) { const int iat = ions_nearby[iind]; const int ig = Ions.GroupID[iat]; const valT r_jI = distjI[iat]; const posT disp_Ij = displjI[iat]; for (int kind = 0; kind < elecs_inside(kg, iat).size(); kind++) { const int kel = elecs_inside(kg, iat)[kind]; if (kel < kelmax && kel != jel) { DistkI_Compressed[kel_counter] = elecs_inside_dist(kg, iat)[kind]; DistjI_Compressed[kel_counter] = r_jI; Distjk_Compressed[kel_counter] = distjk[kel]; Disp_kI_Compressed(kel_counter) = elecs_inside_displ(kg, iat)[kind]; Disp_jI_Compressed(kel_counter) = disp_Ij; Disp_jk_Compressed(kel_counter) = displjk[kel]; DistIndice_k[kel_counter] = kel; kel_counter++; if (kel_counter == Nbuffer) { const FT& feeI(*F(ig, jg, kg)); computeU3_engine(P, feeI, kel_counter, Uj, dUj, d2Uj, Uk, dUk, d2Uk); kel_counter = 0; } } } if ((iind + 1 == ions_nearby.size() || ig != Ions.GroupID[ions_nearby[iind + 1]]) && kel_counter > 0) { const FT& feeI(*F(ig, jg, kg)); computeU3_engine(P, feeI, kel_counter, Uj, dUj, d2Uj, Uk, dUk, d2Uk); kel_counter = 0; } } } } inline void registerData(ParticleSet& P, WFBufferType& buf) { if (Bytes_in_WFBuffer == 0) { Bytes_in_WFBuffer = buf.current(); buf.add(Uat.begin(), Uat.end()); buf.add(dUat.data(), dUat.end()); buf.add(d2Uat.begin(), d2Uat.end()); Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer; // free local space Uat.free(); dUat.free(); d2Uat.free(); } else { buf.forward(Bytes_in_WFBuffer); } } inline LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false) { evaluateGL(P, P.G, P.L, false); buf.forward(Bytes_in_WFBuffer); return LogValue; } inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf) { Uat.attachReference(buf.lendReference<valT>(Nelec), Nelec); dUat.attachReference(Nelec, Nelec_padded, buf.lendReference<valT>(Nelec_padded * OHMMS_DIM)); d2Uat.attachReference(buf.lendReference<valT>(Nelec), Nelec); build_compact_list(P); } LogValueType evaluateGL(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L, bool fromscratch = false) { if (fromscratch) recompute(P); LogValue = valT(0); for (int iat = 0; iat < Nelec; ++iat) { LogValue += Uat[iat]; G[iat] += dUat[iat]; L[iat] += d2Uat[iat]; } return LogValue = -LogValue * 0.5; } void evaluateDerivatives(ParticleSet& P, const opt_variables_type& optvars, std::vector<ValueType>& dlogpsi, std::vector<ValueType>& dhpsioverpsi) { bool recalculate(false); std::vector<bool> rcsingles(myVars.size(), false); for (int k = 0; k < myVars.size(); ++k) { int kk = myVars.where(k); if (kk < 0) continue; if (optvars.recompute(kk)) recalculate = true; rcsingles[k] = true; } if (recalculate) { constexpr valT czero(0); constexpr valT cone(1); constexpr valT cminus(-1); constexpr valT ctwo(2); constexpr valT lapfac = OHMMS_DIM - cone; const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); const auto& ee_dists = ee_table.getDistances(); const auto& ee_displs = ee_table.getDisplacements(); build_compact_list(P); dLogPsi = czero; gradLogPsi = PosType(); lapLogPsi = czero; for (int iat = 0; iat < Nion; ++iat) { const int ig = Ions.GroupID[iat]; for (int jg = 0; jg < eGroups; ++jg) for (int jind = 0; jind < elecs_inside(jg, iat).size(); jind++) { const int jel = elecs_inside(jg, iat)[jind]; const valT r_Ij = elecs_inside_dist(jg, iat)[jind]; const posT disp_Ij = cminus * elecs_inside_displ(jg, iat)[jind]; const valT r_Ij_inv = cone / r_Ij; for (int kg = 0; kg < eGroups; ++kg) for (int kind = 0; kind < elecs_inside(kg, iat).size(); kind++) { const int kel = elecs_inside(kg, iat)[kind]; if (kel < jel) { const valT r_Ik = elecs_inside_dist(kg, iat)[kind]; const posT disp_Ik = cminus * elecs_inside_displ(kg, iat)[kind]; const valT r_Ik_inv = cone / r_Ik; const valT r_jk = ee_dists[jel][kel]; const posT disp_jk = ee_displs[jel][kel]; const valT r_jk_inv = cone / r_jk; FT& func = *F(ig, jg, kg); int idx = J3UniqueIndex[F(ig, jg, kg)]; func.evaluateDerivatives(r_jk, r_Ij, r_Ik, du_dalpha[idx], dgrad_dalpha[idx], dhess_dalpha[idx]); int first = VarOffset(ig, jg, kg).first; int last = VarOffset(ig, jg, kg).second; std::vector<RealType>& dlog = du_dalpha[idx]; std::vector<PosType>& dgrad = dgrad_dalpha[idx]; std::vector<Tensor<RealType, 3>>& dhess = dhess_dalpha[idx]; for (int p = first, ip = 0; p < last; p++, ip++) { RealType& dval = dlog[ip]; PosType& dg = dgrad[ip]; Tensor<RealType, 3>& dh = dhess[ip]; dg[0] *= r_jk_inv; dg[1] *= r_Ij_inv; dg[2] *= r_Ik_inv; PosType gr_ee = dg[0] * disp_jk; gradLogPsi(p, jel) -= dg[1] * disp_Ij - gr_ee; lapLogPsi(p, jel) -= (dh(0, 0) + lapfac * dg[0] - ctwo * dh(0, 1) * dot(disp_jk, disp_Ij) * r_jk_inv * r_Ij_inv + dh(1, 1) + lapfac * dg[1]); gradLogPsi(p, kel) -= dg[2] * disp_Ik + gr_ee; lapLogPsi(p, kel) -= (dh(0, 0) + lapfac * dg[0] + ctwo * dh(0, 2) * dot(disp_jk, disp_Ik) * r_jk_inv * r_Ik_inv + dh(2, 2) + lapfac * dg[2]); dLogPsi[p] -= dval; } } } } } for (int k = 0; k < myVars.size(); ++k) { int kk = myVars.where(k); if (kk < 0) continue; dlogpsi[kk] = (ValueType)dLogPsi[k]; RealType sum = 0.0; for (int i = 0; i < Nelec; i++) { #if defined(QMC_COMPLEX) sum -= 0.5 * lapLogPsi(k, i); for (int jdim = 0; jdim < OHMMS_DIM; ++jdim) sum -= P.G[i][jdim].real() * gradLogPsi(k, i)[jdim]; #else sum -= 0.5 * lapLogPsi(k, i) + dot(P.G[i], gradLogPsi(k, i)); #endif } dhpsioverpsi[kk] = (ValueType)sum; } } } inline GradType evalGradSource(ParticleSet& P, ParticleSet& source, int isrc) { ParticleSet::ParticleGradient_t tempG; ParticleSet::ParticleLaplacian_t tempL; tempG.resize(P.getTotalNum()); tempL.resize(P.getTotalNum()); QTFull::RealType delta = 0.00001; QTFull::RealType c1 = 1.0 / delta / 2.0; QTFull::RealType c2 = 1.0 / delta / delta; GradType g_return(0.0); // GRAD TEST COMPUTATION PosType rI = source.R[isrc]; for (int iondim = 0; iondim < 3; iondim++) { source.R[isrc][iondim] = rI[iondim] + delta; source.update(); P.update(); LogValueType log_p = evaluateLog(P, tempG, tempL); source.R[isrc][iondim] = rI[iondim] - delta; source.update(); P.update(); LogValueType log_m = evaluateLog(P, tempG, tempL); QTFull::RealType log_p_r(0.0), log_m_r(0.0); log_p_r = log_p.real(); log_m_r = log_m.real(); //symmetric finite difference formula for gradient. g_return[iondim] = c1 * (log_p_r - log_m_r); //reset everything to how it was. source.R[isrc][iondim] = rI[iondim]; } // this last one makes sure the distance tables and internal neighbourlist correspond to unperturbed source. source.update(); P.update(); build_compact_list(P); return g_return; } inline GradType evalGradSource(ParticleSet& P, ParticleSet& source, int isrc, TinyVector<ParticleSet::ParticleGradient_t, OHMMS_DIM>& grad_grad, TinyVector<ParticleSet::ParticleLaplacian_t, OHMMS_DIM>& lapl_grad) { ParticleSet::ParticleGradient_t Gp, Gm, dG; ParticleSet::ParticleLaplacian_t Lp, Lm, dL; Gp.resize(P.getTotalNum()); Gm.resize(P.getTotalNum()); dG.resize(P.getTotalNum()); Lp.resize(P.getTotalNum()); Lm.resize(P.getTotalNum()); dL.resize(P.getTotalNum()); QTFull::RealType delta = 0.00001; QTFull::RealType c1 = 1.0 / delta / 2.0; QTFull::RealType c2 = 1.0 / delta / delta; GradType g_return(0.0); // GRAD TEST COMPUTATION PosType rI = source.R[isrc]; for (int iondim = 0; iondim < 3; iondim++) { Lp = 0; Gp = 0; Lm = 0; Gm = 0; source.R[isrc][iondim] = rI[iondim] + delta; source.update(); P.update(); LogValueType log_p = evaluateLog(P, Gp, Lp); source.R[isrc][iondim] = rI[iondim] - delta; source.update(); P.update(); LogValueType log_m = evaluateLog(P, Gm, Lm); QTFull::RealType log_p_r(0.0), log_m_r(0.0); log_p_r = log_p.real(); log_m_r = log_m.real(); dG = Gp - Gm; dL = Lp - Lm; //symmetric finite difference formula for gradient. g_return[iondim] = c1 * (log_p_r - log_m_r); grad_grad[iondim] += c1 * dG; lapl_grad[iondim] += c1 * dL; //reset everything to how it was. source.R[isrc][iondim] = rI[iondim]; } // this last one makes sure the distance tables and internal neighbourlist correspond to unperturbed source. source.update(); P.update(); build_compact_list(P); return g_return; } }; } // namespace qmcplusplus #endif
updatePCG.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ extern "C" void BPUpdatePCG(const dlong & N, const dlong & Nblocks, const dfloat * __restrict__ cpu_invDegree, const dfloat * __restrict__ cpu_p, const dfloat * __restrict__ cpu_Ap, const dfloat & alpha, dfloat * __restrict__ cpu_x, dfloat * __restrict__ cpu_r, dfloat * __restrict__ redr){ dfloat rdotr = 0; const dlong Nelements = N/p_Np; #pragma omp parallel for reduction(+: rdotr) for(dlong e=0;e<Nelements;++e){ for(int i=0;i<p_Np;++i){ const dlong n = e*p_Np+i; cpu_x[n] += alpha*cpu_p[n]; const dfloat rn = cpu_r[n] - alpha*cpu_Ap[n]; rdotr += rn*rn*cpu_invDegree[n]; cpu_r[n] = rn; } } redr[0] = rdotr; } extern "C" void BPMultipleUpdatePCG( const dlong & N, const dlong & offset, const dlong & Nblocks, const dfloat * __restrict__ cpu_invDegree, const dfloat * __restrict__ cpu_p, const dfloat * __restrict__ cpu_Ap, const dfloat alpha, dfloat * __restrict__ cpu_x, dfloat * __restrict__ cpu_r, dfloat * __restrict__ redr){ dfloat rdotr = 0; const dlong Nelements = N/p_Np; for(int fld=0; fld<p_Nfields; fld++){ #pragma omp parallel for reduction(+: rdotr) for(dlong e=0;e<Nelements;++e){ for(int i=0;i<p_Np;++i){ const dlong n = e*p_Np+i + fld*offset; cpu_x[n] += alpha*cpu_p[n]; const dfloat rn = cpu_r[n] - alpha*cpu_Ap[n]; rdotr += rn*rn*cpu_invDegree[e*p_Np+i]; cpu_r[n] = rn; } } } redr[0] = rdotr; }
update_ops_matrix_dense_double.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include "constant.h" #include "utility.h" #include "update_ops.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _USE_SIMD #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #endif #ifdef _USE_SIMD void double_qubit_dense_matrix_gate_simd_high(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE mat[16], CTYPE* vec, ITYPE dim); void double_qubit_dense_matrix_gate_simd_middle(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE mat[16], CTYPE* vec, ITYPE dim); void double_qubit_dense_matrix_gate_simd_low(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE mat[16], CTYPE* vec, ITYPE dim); #endif void double_qubit_dense_matrix_gate_c(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE matrix[16], CTYPE *state, ITYPE dim) { #ifdef _OPENMP UINT threshold = 13; UINT default_thread_count = omp_get_max_threads(); if (dim < (((ITYPE)1) << threshold)) omp_set_num_threads(1); #endif #ifdef _USE_SIMD double_qubit_dense_matrix_gate_simd(target_qubit_index1, target_qubit_index2, matrix, state, dim); #else double_qubit_dense_matrix_gate_nosimd(target_qubit_index1, target_qubit_index2, matrix, state, dim); #endif #ifdef _OPENMP omp_set_num_threads(default_thread_count); #endif } void double_qubit_dense_matrix_gate_nosimd(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE matrix[16], CTYPE *state, ITYPE dim) { const UINT min_qubit_index = get_min_ui(target_qubit_index1, target_qubit_index2); const UINT max_qubit_index = get_max_ui(target_qubit_index1, target_qubit_index2); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); const ITYPE target_mask1 = 1ULL << target_qubit_index1; const ITYPE target_mask2 = 1ULL << target_qubit_index2; // loop variables const ITYPE loop_dim = dim / 4; ITYPE state_index; #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; ++state_index) { // create index ITYPE basis_0 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2); // gather index ITYPE basis_1 = basis_0 + target_mask1; ITYPE basis_2 = basis_0 + target_mask2; ITYPE basis_3 = basis_1 + target_mask2; // fetch values CTYPE cval_0 = state[basis_0]; CTYPE cval_1 = state[basis_1]; CTYPE cval_2 = state[basis_2]; CTYPE cval_3 = state[basis_3]; // set values state[basis_0] = matrix[0] * cval_0 + matrix[1] * cval_1 + matrix[2] * cval_2 + matrix[3] * cval_3; state[basis_1] = matrix[4] * cval_0 + matrix[5] * cval_1 + matrix[6] * cval_2 + matrix[7] * cval_3; state[basis_2] = matrix[8] * cval_0 + matrix[9] * cval_1 + matrix[10] * cval_2 + matrix[11] * cval_3; state[basis_3] = matrix[12] * cval_0 + matrix[13] * cval_1 + matrix[14] * cval_2 + matrix[15] * cval_3; } } #ifdef _USE_SIMD void double_qubit_dense_matrix_gate_simd_high(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE mat[16], CTYPE* vec, ITYPE dim) { assert(target_qubit_index1 >= 2); assert(target_qubit_index2 >= 2); const UINT min_qubit_index = get_min_ui(target_qubit_index1, target_qubit_index2); const UINT max_qubit_index = get_max_ui(target_qubit_index1, target_qubit_index2); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); const ITYPE target_mask1_shift = 1ULL << (target_qubit_index1 + 1); const ITYPE target_mask2_shift = 1ULL << (target_qubit_index2 + 1); // loop variables const ITYPE loop_dim = dim / 4; ITYPE state_index; double* ptr_vec = (double*)vec; const double* ptr_mat = (const double*)mat; #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; state_index += 4) { __m256d res_real_sum, res_imag_sum; __m256d vec_before, vec_after; __m256d vec_real00, vec_imag00; __m256d vec_real01, vec_imag01; __m256d vec_real10, vec_imag10; __m256d vec_real11, vec_imag11; __m256d dup_mr, dup_mi; // create index ITYPE basis00 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2); // shited due to index from complex -> double basis00 = basis00 << 1; ITYPE basis01 = basis00 + target_mask1_shift; ITYPE basis10 = basis00 + target_mask2_shift; ITYPE basis11 = basis01 + target_mask2_shift; //// Pick 4 complex values from basis00 vec_before = _mm256_loadu_pd(ptr_vec + basis00); // (i1 r1 i0 r0) vec_after = _mm256_loadu_pd(ptr_vec + basis00 + 4); // (i3 r3 i2 r2) //// Split real values and imag values via shuffle vec_real00 = _mm256_shuffle_pd(vec_before, vec_after, 0); // (i1 r1 i0 r0) (i3 r3 i2 r2) -> (r3 r1 r2 r0) 0000 = 0 vec_imag00 = _mm256_shuffle_pd(vec_before, vec_after, 15); // (i1 r1 i0 r0) (i3 r3 i2 r2) -> (i3 i1 i2 i0) 1111 = 15 //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[0]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[1]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_mul_pd(vec_real00, dup_mr); res_real_sum = _mm256_fnmadd_pd(vec_imag00, dup_mi, res_real_sum); // -a*b+c res_imag_sum = _mm256_mul_pd(vec_real00, dup_mi); res_imag_sum = _mm256_fmadd_pd(vec_imag00, dup_mr, res_imag_sum); // a*b+c //// Pick 4 complex values from basis01 vec_before = _mm256_loadu_pd(ptr_vec + basis01); // (i1 r1 i0 r0) vec_after = _mm256_loadu_pd(ptr_vec + basis01 + 4); // (i3 r3 i2 r2) //// Split real values and imag values via shuffle vec_real01 = _mm256_shuffle_pd(vec_before, vec_after, 0); // (i1 r1 i0 r0) (i3 r3 i2 r2) -> (r3 r1 r2 r0) 0000 = 0 vec_imag01 = _mm256_shuffle_pd(vec_before, vec_after, 15); // (i1 r1 i0 r0) (i3 r3 i2 r2) -> (i3 i1 i2 i0) 1111 = 15 //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[2]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[3]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real01, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag01, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real01, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag01, dup_mr, res_imag_sum); // a*b+c //// Pick 4 complex values from basis10 vec_before = _mm256_loadu_pd(ptr_vec + basis10); // (i1 r1 i0 r0) vec_after = _mm256_loadu_pd(ptr_vec + basis10 + 4); // (i3 r3 i2 r2) //// Split real values and imag values via shuffle vec_real10 = _mm256_shuffle_pd(vec_before, vec_after, 0); // (i1 r1 i0 r0) (i3 r3 i2 r2) -> (r3 r1 r2 r0) 0000 = 0 vec_imag10 = _mm256_shuffle_pd(vec_before, vec_after, 15); // (i1 r1 i0 r0) (i3 r3 i2 r2) -> (i3 i1 i2 i0) 1111 = 15 //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[4]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[5]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real10, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag10, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real10, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag10, dup_mr, res_imag_sum); // a*b+c //// Pick 4 complex values from basis11 vec_before = _mm256_loadu_pd(ptr_vec + basis11); // (i1 r1 i0 r0) vec_after = _mm256_loadu_pd(ptr_vec + basis11 + 4); // (i3 r3 i2 r2) //// Split real values and imag values via shuffle vec_real11 = _mm256_shuffle_pd(vec_before, vec_after, 0); // (i1 r1 i0 r0) (i3 r3 i2 r2) -> (r3 r1 r2 r0) 0000 = 0 vec_imag11 = _mm256_shuffle_pd(vec_before, vec_after, 15); // (i1 r1 i0 r0) (i3 r3 i2 r2) -> (i3 i1 i2 i0) 1111 = 15 //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[6]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[7]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real11, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag11, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real11, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag11, dup_mr, res_imag_sum); // a*b+c //// Store vec_before = _mm256_shuffle_pd(res_real_sum, res_imag_sum, 0); // (r3 r1 r2 r0) (i3 i1 i2 i0) -> (r1 i1 i0 r0) 0000 = 0 vec_after = _mm256_shuffle_pd(res_real_sum, res_imag_sum, 15); // (r3 r1 r2 r0) (i3 i1 i2 i0) -> (r3 i3 i2 r2) 1111 = 15 _mm256_storeu_pd(ptr_vec + basis00, vec_before); _mm256_storeu_pd(ptr_vec + basis00 + 4, vec_after); // vector is already fetched, fetch successive matrix elements and perform dot(vec,vec) for other basis //// basis01 //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[8]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[9]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_mul_pd(vec_real00, dup_mr); res_real_sum = _mm256_fnmadd_pd(vec_imag00, dup_mi, res_real_sum); // -a*b+c res_imag_sum = _mm256_mul_pd(vec_real00, dup_mi); res_imag_sum = _mm256_fmadd_pd(vec_imag00, dup_mr, res_imag_sum); // a*b+c //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[10]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[11]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real01, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag01, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real01, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag01, dup_mr, res_imag_sum); // a*b+c //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[12]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[13]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real10, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag10, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real10, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag10, dup_mr, res_imag_sum); // a*b+c //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[14]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[15]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real11, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag11, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real11, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag11, dup_mr, res_imag_sum); // a*b+c //// Store vec_before = _mm256_shuffle_pd(res_real_sum, res_imag_sum, 0); // (r3 r1 r2 r0) (i3 i1 i2 i0) -> (r1 i1 i0 r0) 0000 = 0 vec_after = _mm256_shuffle_pd(res_real_sum, res_imag_sum, 15); // (r3 r1 r2 r0) (i3 i1 i2 i0) -> (r3 i3 i2 r2) 1111 = 15 _mm256_storeu_pd(ptr_vec + basis01, vec_before); _mm256_storeu_pd(ptr_vec + basis01 + 4, vec_after); //// basis10 //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[16]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[17]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_mul_pd(vec_real00, dup_mr); res_real_sum = _mm256_fnmadd_pd(vec_imag00, dup_mi, res_real_sum); // -a*b+c res_imag_sum = _mm256_mul_pd(vec_real00, dup_mi); res_imag_sum = _mm256_fmadd_pd(vec_imag00, dup_mr, res_imag_sum); // a*b+c //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[18]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[19]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real01, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag01, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real01, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag01, dup_mr, res_imag_sum); // a*b+c //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[20]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[21]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real10, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag10, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real10, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag10, dup_mr, res_imag_sum); // a*b+c //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[22]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[23]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real11, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag11, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real11, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag11, dup_mr, res_imag_sum); // a*b+c //// Store vec_before = _mm256_shuffle_pd(res_real_sum, res_imag_sum, 0); // (r3 r1 r2 r0) (i3 i1 i2 i0) -> (r1 i1 i0 r0) 0000 = 0 vec_after = _mm256_shuffle_pd(res_real_sum, res_imag_sum, 15); // (r3 r1 r2 r0) (i3 i1 i2 i0) -> (r3 i3 i2 r2) 1111 = 15 _mm256_storeu_pd(ptr_vec + basis10, vec_before); _mm256_storeu_pd(ptr_vec + basis10 + 4, vec_after); //// basis11 //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[24]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[25]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_mul_pd(vec_real00, dup_mr); res_real_sum = _mm256_fnmadd_pd(vec_imag00, dup_mi, res_real_sum); // -a*b+c res_imag_sum = _mm256_mul_pd(vec_real00, dup_mi); res_imag_sum = _mm256_fmadd_pd(vec_imag00, dup_mr, res_imag_sum); // a*b+c //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[26]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[27]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real01, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag01, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real01, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag01, dup_mr, res_imag_sum); // a*b+c //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[28]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[29]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real10, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag10, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real10, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag10, dup_mr, res_imag_sum); // a*b+c //// Pick matrix elem with 4 dup dup_mr = _mm256_set1_pd(ptr_mat[30]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[31]); // (mi0 mi0 mi0 mi0) //// Compute real and imag part res_real_sum = _mm256_fmadd_pd(vec_real11, dup_mr, res_real_sum); // a*b+c res_real_sum = _mm256_fnmadd_pd(vec_imag11, dup_mi, res_real_sum); //-a*b+c res_imag_sum = _mm256_fmadd_pd(vec_real11, dup_mi, res_imag_sum); // a*b+c res_imag_sum = _mm256_fmadd_pd(vec_imag11, dup_mr, res_imag_sum); // a*b+c //// Store vec_before = _mm256_shuffle_pd(res_real_sum, res_imag_sum, 0); // (r3 r1 r2 r0) (i3 i1 i2 i0) -> (r1 i1 i0 r0) 0000 = 0 vec_after = _mm256_shuffle_pd(res_real_sum, res_imag_sum, 15); // (r3 r1 r2 r0) (i3 i1 i2 i0) -> (r3 i3 i2 r2) 1111 = 15 _mm256_storeu_pd(ptr_vec + basis11, vec_before); _mm256_storeu_pd(ptr_vec + basis11 + 4, vec_after); } } void double_qubit_dense_matrix_gate_simd_low(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE mat[16], CTYPE* vec, ITYPE dim) { assert(target_qubit_index1 < 2); assert(target_qubit_index2 < 2); assert(dim >= 8); // loop variables const ITYPE loop_dim = dim * 2; ITYPE state_index; double* ptr_vec = (double*)vec; const double* ptr_mat = (const double*)mat; if (target_qubit_index1 < target_qubit_index2) { #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; state_index += 16) { __m256d vec1, vec2, vec3, vec4; __m256d u1, u2, u3, u4, u1f, u2f, u3f, u4f; __m256d mr, mi; vec1 = _mm256_loadu_pd(ptr_vec + state_index); // c1 c0 vec1 = _mm256_permute4x64_pd(vec1, 78); // (c1 c0) -> (c0 c1) : 1032 = 1*2 + 4*3 + 16*0 + 64*1 = 64+12+2=78 vec2 = _mm256_loadu_pd(ptr_vec + state_index + 4); // c3 c2 vec2 = _mm256_permute4x64_pd(vec2, 78); // (c3 c2) -> (c2 c3) : 1032 = 1*2+4*3+16*0+32*1 = 46 vec3 = _mm256_loadu_pd(ptr_vec + state_index + 8); // c5 c4 u1 = _mm256_blend_pd(vec1, vec3, 3); // (c0 c1) (c5 c4) -> (c0 c4) : 0011 = 3 u2 = _mm256_blend_pd(vec1, vec3, 12); // (c0 c1) (c5 c4) -> (c5 c1) : 1100 = 12 u2 = _mm256_permute4x64_pd(u2, 78); // (c5 c1) -> (c1 c5) : 1032 = 1*2+4*3+16*0+64*1 = 64+12+2=78 vec4 = _mm256_loadu_pd(ptr_vec + state_index + 12); // c7 c6 u3 = _mm256_blend_pd(vec2, vec4, 3); // (c2 c3) (c7 c6) -> (c2 c6) : 0011 = 3 u4 = _mm256_blend_pd(vec2, vec4, 12); // (c2 c3) (c7 c6) -> (c7 c3) : 1100 = 12 u4 = _mm256_permute4x64_pd(u4, 78); // (c7 c3) -> (c3 c7) : 1032 = 1*2+4*3+16*0+32*1 = 46 u1f = _mm256_permute4x64_pd(u1, 177); // 2301 = 64*2+16*3+1 = 128+48+1 = 177 u2f = _mm256_permute4x64_pd(u2, 177); u3f = _mm256_permute4x64_pd(u3, 177); u4f = _mm256_permute4x64_pd(u4, 177); // u1 = (c0i c0r c4i c4r) // u2 = (c1i c1r c5i c5r) // u3 = (c2i c2r c6i c6r) // u4 = (c3i c3r c7i c7r) // u1f = (c0r c0i c4r c4i) // u2f = (c1r c1i c5r c5i) // u3f = (c2r c2i c6r c6i) // u4f = (c3r c3i c7r c7i) __m256d res_u1, res_u2, res_u3, res_u4, tmp_inv; tmp_inv = _mm256_set_pd(1, -1, 1, -1); mr = _mm256_set1_pd(ptr_mat[0]); res_u1 = _mm256_mul_pd(mr, u1); // c0i*m0r, -c0r*m0r mi = _mm256_set1_pd(ptr_mat[1]); res_u1 = _mm256_fmaddsub_pd(mi, u1f, res_u1); // m0i*c0r + c0i*m0r, m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[2]); res_u1 = _mm256_fmaddsub_pd(mr, u2, res_u1); // m1r*c1i + m0i*c0r + c0i*m0r, m1r*c1r - m0i*c0i + c0r*m0r mi = _mm256_set1_pd(ptr_mat[3]); res_u1 = _mm256_fmaddsub_pd(mi, u2f, res_u1); // m1i*c1r + m1r*c1i + m0i*c0r + c0i*m0r, m1i*c1i - m1r*c1r + m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[4]); res_u1 = _mm256_fmaddsub_pd(mr, u3, res_u1); mi = _mm256_set1_pd(ptr_mat[5]); res_u1 = _mm256_fmaddsub_pd(mi, u3f, res_u1); mr = _mm256_set1_pd(ptr_mat[6]); res_u1 = _mm256_fmaddsub_pd(mr, u4, res_u1); mi = _mm256_set1_pd(ptr_mat[7]); res_u1 = _mm256_fmaddsub_pd(mi, u4f, res_u1); res_u1 = _mm256_mul_pd(res_u1, tmp_inv); mr = _mm256_set1_pd(ptr_mat[8]); res_u2 = _mm256_mul_pd(mr, u1); // c0i*m0r, -c0r*m0r mi = _mm256_set1_pd(ptr_mat[9]); res_u2 = _mm256_fmaddsub_pd(mi, u1f, res_u2); // m0i*c0r + c0i*m0r, m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[10]); res_u2 = _mm256_fmaddsub_pd(mr, u2, res_u2); // m1r*c1i + m0i*c0r + c0i*m0r, m1r*c1r - m0i*c0i + c0r*m0r mi = _mm256_set1_pd(ptr_mat[11]); res_u2 = _mm256_fmaddsub_pd(mi, u2f, res_u2); // m1i*c1r + m1r*c1i + m0i*c0r + c0i*m0r, m1i*c1i - m1r*c1r + m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[12]); res_u2 = _mm256_fmaddsub_pd(mr, u3, res_u2); mi = _mm256_set1_pd(ptr_mat[13]); res_u2 = _mm256_fmaddsub_pd(mi, u3f, res_u2); mr = _mm256_set1_pd(ptr_mat[14]); res_u2 = _mm256_fmaddsub_pd(mr, u4, res_u2); mi = _mm256_set1_pd(ptr_mat[15]); res_u2 = _mm256_fmaddsub_pd(mi, u4f, res_u2); res_u2 = _mm256_mul_pd(res_u2, tmp_inv); res_u2 = _mm256_permute4x64_pd(res_u2, 78); // flip vec1 = _mm256_blend_pd(res_u1, res_u2, 3); // blend vec2 = _mm256_blend_pd(res_u1, res_u2, 12); // blend vec1 = _mm256_permute4x64_pd(vec1, 78); // flip _mm256_storeu_pd(ptr_vec + state_index, vec1); _mm256_storeu_pd(ptr_vec + state_index + 8, vec2); mr = _mm256_set1_pd(ptr_mat[16]); res_u3 = _mm256_mul_pd(mr, u1); // c0i*m0r, -c0r*m0r mi = _mm256_set1_pd(ptr_mat[17]); res_u3 = _mm256_fmaddsub_pd(mi, u1f, res_u3); // m0i*c0r + c0i*m0r, m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[18]); res_u3 = _mm256_fmaddsub_pd(mr, u2, res_u3); // m1r*c1i + m0i*c0r + c0i*m0r, m1r*c1r - m0i*c0i + c0r*m0r mi = _mm256_set1_pd(ptr_mat[19]); res_u3 = _mm256_fmaddsub_pd(mi, u2f, res_u3); // m1i*c1r + m1r*c1i + m0i*c0r + c0i*m0r, m1i*c1i - m1r*c1r + m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[20]); res_u3 = _mm256_fmaddsub_pd(mr, u3, res_u3); mi = _mm256_set1_pd(ptr_mat[21]); res_u3 = _mm256_fmaddsub_pd(mi, u3f, res_u3); mr = _mm256_set1_pd(ptr_mat[22]); res_u3 = _mm256_fmaddsub_pd(mr, u4, res_u3); mi = _mm256_set1_pd(ptr_mat[23]); res_u3 = _mm256_fmaddsub_pd(mi, u4f, res_u3); res_u3 = _mm256_mul_pd(res_u3, tmp_inv); mr = _mm256_set1_pd(ptr_mat[24]); res_u4 = _mm256_mul_pd(mr, u1); // c0i*m0r, -c0r*m0r mi = _mm256_set1_pd(ptr_mat[25]); res_u4 = _mm256_fmaddsub_pd(mi, u1f, res_u4); // m0i*c0r + c0i*m0r, m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[26]); res_u4 = _mm256_fmaddsub_pd(mr, u2, res_u4); // m1r*c1i + m0i*c0r + c0i*m0r, m1r*c1r - m0i*c0i + c0r*m0r mi = _mm256_set1_pd(ptr_mat[27]); res_u4 = _mm256_fmaddsub_pd(mi, u2f, res_u4); // m1i*c1r + m1r*c1i + m0i*c0r + c0i*m0r, m1i*c1i - m1r*c1r + m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[28]); res_u4 = _mm256_fmaddsub_pd(mr, u3, res_u4); mi = _mm256_set1_pd(ptr_mat[29]); res_u4 = _mm256_fmaddsub_pd(mi, u3f, res_u4); mr = _mm256_set1_pd(ptr_mat[30]); res_u4 = _mm256_fmaddsub_pd(mr, u4, res_u4); mi = _mm256_set1_pd(ptr_mat[31]); res_u4 = _mm256_fmaddsub_pd(mi, u4f, res_u4); res_u4 = _mm256_mul_pd(res_u4, tmp_inv); res_u4 = _mm256_permute4x64_pd(res_u4, 78); // flip vec3 = _mm256_blend_pd(res_u3, res_u4, 3); // blend vec4 = _mm256_blend_pd(res_u3, res_u4, 12); // blend vec3 = _mm256_permute4x64_pd(vec3, 78); // flip _mm256_storeu_pd(ptr_vec + state_index + 4, vec3); _mm256_storeu_pd(ptr_vec + state_index + 12, vec4); } } else { #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; state_index += 16) { __m256d vec1, vec2, vec3, vec4; __m256d u1, u2, u3, u4, u1f, u2f, u3f, u4f; __m256d mr, mi; vec1 = _mm256_loadu_pd(ptr_vec + state_index); // c1 c0 vec1 = _mm256_permute4x64_pd(vec1, 78); // (c1 c0) -> (c0 c1) : 1032 = 1*2 + 4*3 + 16*0 + 64*1 = 64+12+2=78 vec2 = _mm256_loadu_pd(ptr_vec + state_index + 4); // c3 c2 vec2 = _mm256_permute4x64_pd(vec2, 78); // (c3 c2) -> (c2 c3) : 1032 = 1*2+4*3+16*0+32*1 = 46 vec3 = _mm256_loadu_pd(ptr_vec + state_index + 8); // c5 c4 u1 = _mm256_blend_pd(vec1, vec3, 3); // (c0 c1) (c5 c4) -> (c0 c4) : 0011 = 3 u2 = _mm256_blend_pd(vec1, vec3, 12); // (c0 c1) (c5 c4) -> (c5 c1) : 1100 = 12 u2 = _mm256_permute4x64_pd(u2, 78); // (c5 c1) -> (c1 c5) : 1032 = 1*2+4*3+16*0+64*1 = 64+12+2=78 vec4 = _mm256_loadu_pd(ptr_vec + state_index + 12); // c7 c6 u3 = _mm256_blend_pd(vec2, vec4, 3); // (c2 c3) (c7 c6) -> (c2 c6) : 0011 = 3 u4 = _mm256_blend_pd(vec2, vec4, 12); // (c2 c3) (c7 c6) -> (c7 c3) : 1100 = 12 u4 = _mm256_permute4x64_pd(u4, 78); // (c7 c3) -> (c3 c7) : 1032 = 1*2+4*3+16*0+32*1 = 46 u1f = _mm256_permute4x64_pd(u1, 177); // 2301 = 64*2+16*3+1 = 128+48+1 = 177 u2f = _mm256_permute4x64_pd(u2, 177); u3f = _mm256_permute4x64_pd(u3, 177); u4f = _mm256_permute4x64_pd(u4, 177); // u1 = (c0i c0r c4i c4r) // u2 = (c1i c1r c5i c5r) // u3 = (c2i c2r c6i c6r) // u4 = (c3i c3r c7i c7r) // u1f = (c0r c0i c4r c4i) // u2f = (c1r c1i c5r c5i) // u3f = (c2r c2i c6r c6i) // u4f = (c3r c3i c7r c7i) __m256d res_u1, res_u2, res_u3, res_u4, tmp_inv; tmp_inv = _mm256_set_pd(1, -1, 1, -1); mr = _mm256_set1_pd(ptr_mat[0]); res_u1 = _mm256_mul_pd(mr, u1); // c0i*m0r, -c0r*m0r mi = _mm256_set1_pd(ptr_mat[1]); res_u1 = _mm256_fmaddsub_pd(mi, u1f, res_u1); // m0i*c0r + c0i*m0r, m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[2]); res_u1 = _mm256_fmaddsub_pd(mr, u3, res_u1); // m1r*c1i + m0i*c0r + c0i*m0r, m1r*c1r - m0i*c0i + c0r*m0r mi = _mm256_set1_pd(ptr_mat[3]); res_u1 = _mm256_fmaddsub_pd(mi, u3f, res_u1); // m1i*c1r + m1r*c1i + m0i*c0r + c0i*m0r, m1i*c1i - m1r*c1r + m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[4]); res_u1 = _mm256_fmaddsub_pd(mr, u2, res_u1); mi = _mm256_set1_pd(ptr_mat[5]); res_u1 = _mm256_fmaddsub_pd(mi, u2f, res_u1); mr = _mm256_set1_pd(ptr_mat[6]); res_u1 = _mm256_fmaddsub_pd(mr, u4, res_u1); mi = _mm256_set1_pd(ptr_mat[7]); res_u1 = _mm256_fmaddsub_pd(mi, u4f, res_u1); res_u1 = _mm256_mul_pd(res_u1, tmp_inv); mr = _mm256_set1_pd(ptr_mat[16]); res_u3 = _mm256_mul_pd(mr, u1); // c0i*m0r, -c0r*m0r mi = _mm256_set1_pd(ptr_mat[17]); res_u3 = _mm256_fmaddsub_pd(mi, u1f, res_u3); // m0i*c0r + c0i*m0r, m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[18]); res_u3 = _mm256_fmaddsub_pd(mr, u3, res_u3); // m1r*c1i + m0i*c0r + c0i*m0r, m1r*c1r - m0i*c0i + c0r*m0r mi = _mm256_set1_pd(ptr_mat[19]); res_u3 = _mm256_fmaddsub_pd(mi, u3f, res_u3); // m1i*c1r + m1r*c1i + m0i*c0r + c0i*m0r, m1i*c1i - m1r*c1r + m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[20]); res_u3 = _mm256_fmaddsub_pd(mr, u2, res_u3); mi = _mm256_set1_pd(ptr_mat[21]); res_u3 = _mm256_fmaddsub_pd(mi, u2f, res_u3); mr = _mm256_set1_pd(ptr_mat[22]); res_u3 = _mm256_fmaddsub_pd(mr, u4, res_u3); mi = _mm256_set1_pd(ptr_mat[23]); res_u3 = _mm256_fmaddsub_pd(mi, u4f, res_u3); res_u3 = _mm256_mul_pd(res_u3, tmp_inv); res_u3 = _mm256_permute4x64_pd(res_u3, 78); // flip vec1 = _mm256_blend_pd(res_u1, res_u3, 3); // blend vec3 = _mm256_blend_pd(res_u1, res_u3, 12); // blend vec1 = _mm256_permute4x64_pd(vec1, 78); // flip _mm256_storeu_pd(ptr_vec + state_index, vec1); _mm256_storeu_pd(ptr_vec + state_index + 8, vec3); mr = _mm256_set1_pd(ptr_mat[8]); res_u2 = _mm256_mul_pd(mr, u1); // c0i*m0r, -c0r*m0r mi = _mm256_set1_pd(ptr_mat[9]); res_u2 = _mm256_fmaddsub_pd(mi, u1f, res_u2); // m0i*c0r + c0i*m0r, m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[10]); res_u2 = _mm256_fmaddsub_pd(mr, u3, res_u2); // m1r*c1i + m0i*c0r + c0i*m0r, m1r*c1r - m0i*c0i + c0r*m0r mi = _mm256_set1_pd(ptr_mat[11]); res_u2 = _mm256_fmaddsub_pd(mi, u3f, res_u2); // m1i*c1r + m1r*c1i + m0i*c0r + c0i*m0r, m1i*c1i - m1r*c1r + m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[12]); res_u2 = _mm256_fmaddsub_pd(mr, u2, res_u2); mi = _mm256_set1_pd(ptr_mat[13]); res_u2 = _mm256_fmaddsub_pd(mi, u2f, res_u2); mr = _mm256_set1_pd(ptr_mat[14]); res_u2 = _mm256_fmaddsub_pd(mr, u4, res_u2); mi = _mm256_set1_pd(ptr_mat[15]); res_u2 = _mm256_fmaddsub_pd(mi, u4f, res_u2); res_u2 = _mm256_mul_pd(res_u2, tmp_inv); mr = _mm256_set1_pd(ptr_mat[24]); res_u4 = _mm256_mul_pd(mr, u1); // c0i*m0r, -c0r*m0r mi = _mm256_set1_pd(ptr_mat[25]); res_u4 = _mm256_fmaddsub_pd(mi, u1f, res_u4); // m0i*c0r + c0i*m0r, m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[26]); res_u4 = _mm256_fmaddsub_pd(mr, u3, res_u4); // m1r*c1i + m0i*c0r + c0i*m0r, m1r*c1r - m0i*c0i + c0r*m0r mi = _mm256_set1_pd(ptr_mat[27]); res_u4 = _mm256_fmaddsub_pd(mi, u3f, res_u4); // m1i*c1r + m1r*c1i + m0i*c0r + c0i*m0r, m1i*c1i - m1r*c1r + m0i*c0i - c0r*m0r mr = _mm256_set1_pd(ptr_mat[28]); res_u4 = _mm256_fmaddsub_pd(mr, u2, res_u4); mi = _mm256_set1_pd(ptr_mat[29]); res_u4 = _mm256_fmaddsub_pd(mi, u2f, res_u4); mr = _mm256_set1_pd(ptr_mat[30]); res_u4 = _mm256_fmaddsub_pd(mr, u4, res_u4); mi = _mm256_set1_pd(ptr_mat[31]); res_u4 = _mm256_fmaddsub_pd(mi, u4f, res_u4); res_u4 = _mm256_mul_pd(res_u4, tmp_inv); res_u4 = _mm256_permute4x64_pd(res_u4, 78); // flip vec2 = _mm256_blend_pd(res_u2, res_u4, 3); // blend vec4 = _mm256_blend_pd(res_u2, res_u4, 12); // blend vec2 = _mm256_permute4x64_pd(vec2, 78); // flip _mm256_storeu_pd(ptr_vec + state_index + 4, vec2); _mm256_storeu_pd(ptr_vec + state_index + 12, vec4); } } } __inline void _element_swap(CTYPE* vec, UINT i1, UINT i2) { CTYPE temp = vec[i1]; vec[i1] = vec[i2]; vec[i2] = temp; } void double_qubit_dense_matrix_gate_simd_middle(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE _mat[16], CTYPE* vec, ITYPE dim) { CTYPE mat[16]; memcpy(mat, _mat, sizeof(CTYPE) * 16); if (target_qubit_index2 < target_qubit_index1) { UINT temp = target_qubit_index1; target_qubit_index1 = target_qubit_index2; target_qubit_index2 = temp; _element_swap(mat, 1, 2); _element_swap(mat, 4, 8); _element_swap(mat, 7, 11); _element_swap(mat, 13, 14); _element_swap(mat, 5, 10); _element_swap(mat, 6, 9); } assert(target_qubit_index1 < 2); assert(target_qubit_index2 >= 2); const UINT min_qubit_index = get_min_ui(target_qubit_index1, target_qubit_index2); const UINT max_qubit_index = get_max_ui(target_qubit_index1, target_qubit_index2); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); const ITYPE target_mask1_shift = 1ULL << (target_qubit_index1 + 1); const ITYPE target_mask2_shift = 1ULL << (target_qubit_index2 + 1); // loop variables const ITYPE loop_dim = dim / 4; ITYPE state_index; double* ptr_vec = (double*)vec; const double* ptr_mat = (const double*)mat; #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; state_index+=2) { // create index ITYPE basis00 = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2); // shited due to index from complex -> double basis00 = basis00 << 1; //ITYPE basis01 = basis00 + target_mask1_shift; ITYPE basis10 = basis00 + target_mask2_shift; //ITYPE basis11 = basis01 + target_mask2_shift; //// Pick 4 complex values from basis00 __m256d vec_bef0, vec_aft0, vec_bef1, vec_aft1; vec_bef0 = _mm256_loadu_pd(ptr_vec + basis00); // (i1 r1 i0 r0) vec_aft0 = _mm256_loadu_pd(ptr_vec + basis00 + 4); // (i3 r3 i2 r2) vec_bef1 = _mm256_loadu_pd(ptr_vec + basis10); vec_aft1 = _mm256_loadu_pd(ptr_vec + basis10 + 4); __m256d vec_u0, vec_u1, vec_u2, vec_u3; __m256d vec_u0f, vec_u1f, vec_u2f, vec_u3f; __m256d vec_inv; vec_inv = _mm256_set_pd(1, -1, 1, -1); if (target_qubit_index1 == 0) { vec_aft0 = _mm256_permute4x64_pd(vec_aft0, 78); // (3 2 1 0) -> (1 0 3 2) 1*2 + 4*3 + 16*0 + 64*1 = 64+12+2 = 78 vec_aft1 = _mm256_permute4x64_pd(vec_aft1, 78); vec_u0 = _mm256_blend_pd(vec_bef0, vec_aft0, 12); // (a a b b) = 1*0 + 2*0 + 4*1 + 8*1 = 12 vec_u1 = _mm256_blend_pd(vec_bef0, vec_aft0, 3); // (a a b b) = 1*0 + 2*0 + 4*1 + 8*1 = 12 vec_u2 = _mm256_blend_pd(vec_bef1, vec_aft1, 12); // (a a b b) = 1*0 + 2*0 + 4*1 + 8*1 = 12 vec_u3 = _mm256_blend_pd(vec_bef1, vec_aft1, 3); // (a a b b) = 1*0 + 2*0 + 4*1 + 8*1 = 12 vec_u1 = _mm256_permute4x64_pd(vec_u1, 78); // (3 2 1 0) -> (1 0 3 2) 1*2 + 4*3 + 16*0 + 64*1 = 64+12+2 = 78 vec_u3 = _mm256_permute4x64_pd(vec_u3, 78); } else { vec_u0 = vec_bef0; vec_u1 = vec_aft0; vec_u2 = vec_bef1; vec_u3 = vec_aft1; } vec_u0f = _mm256_permute_pd(vec_u0, 5); // 1*1 + 2*0 + 4*1 + 8*0 vec_u1f = _mm256_permute_pd(vec_u1, 5); // 1*1 + 2*0 + 4*1 + 8*0 vec_u2f = _mm256_permute_pd(vec_u2, 5); // 1*1 + 2*0 + 4*1 + 8*0 vec_u3f = _mm256_permute_pd(vec_u3, 5); // 1*1 + 2*0 + 4*1 + 8*0 vec_u0f = _mm256_mul_pd(vec_u0f, vec_inv); vec_u1f = _mm256_mul_pd(vec_u1f, vec_inv); vec_u2f = _mm256_mul_pd(vec_u2f, vec_inv); vec_u3f = _mm256_mul_pd(vec_u3f, vec_inv); __m256d dup_mr, dup_mi; __m256d res_sum0, res_sum1, res_sum2, res_sum3; dup_mr = _mm256_set1_pd(ptr_mat[0]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[1]); // (mi0 mi0 mi0 mi0) res_sum0 = _mm256_mul_pd(vec_u0, dup_mr); res_sum0 = _mm256_fmadd_pd(vec_u0f, dup_mi, res_sum0); dup_mr = _mm256_set1_pd(ptr_mat[2]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[3]); // (mi1 mi1 mi1 mi1) res_sum0 = _mm256_fmadd_pd(vec_u1, dup_mr, res_sum0); res_sum0 = _mm256_fmadd_pd(vec_u1f, dup_mi, res_sum0); dup_mr = _mm256_set1_pd(ptr_mat[4]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[5]); // (mi1 mi1 mi1 mi1) res_sum0 = _mm256_fmadd_pd(vec_u2, dup_mr, res_sum0); res_sum0 = _mm256_fmadd_pd(vec_u2f, dup_mi, res_sum0); dup_mr = _mm256_set1_pd(ptr_mat[6]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[7]); // (mi1 mi1 mi1 mi1) res_sum0 = _mm256_fmadd_pd(vec_u3, dup_mr, res_sum0); res_sum0 = _mm256_fmadd_pd(vec_u3f, dup_mi, res_sum0); dup_mr = _mm256_set1_pd(ptr_mat[8]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[9]); // (mi0 mi0 mi0 mi0) res_sum1 = _mm256_mul_pd(vec_u0, dup_mr); res_sum1 = _mm256_fmadd_pd(vec_u0f, dup_mi, res_sum1); dup_mr = _mm256_set1_pd(ptr_mat[10]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[11]); // (mi1 mi1 mi1 mi1) res_sum1 = _mm256_fmadd_pd(vec_u1, dup_mr, res_sum1); res_sum1 = _mm256_fmadd_pd(vec_u1f, dup_mi, res_sum1); dup_mr = _mm256_set1_pd(ptr_mat[12]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[13]); // (mi1 mi1 mi1 mi1) res_sum1 = _mm256_fmadd_pd(vec_u2, dup_mr, res_sum1); res_sum1 = _mm256_fmadd_pd(vec_u2f, dup_mi, res_sum1); dup_mr = _mm256_set1_pd(ptr_mat[14]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[15]); // (mi1 mi1 mi1 mi1) res_sum1 = _mm256_fmadd_pd(vec_u3, dup_mr, res_sum1); res_sum1 = _mm256_fmadd_pd(vec_u3f, dup_mi, res_sum1); dup_mr = _mm256_set1_pd(ptr_mat[16]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[17]); // (mi0 mi0 mi0 mi0) res_sum2 = _mm256_mul_pd(vec_u0, dup_mr); res_sum2 = _mm256_fmadd_pd(vec_u0f, dup_mi, res_sum2); dup_mr = _mm256_set1_pd(ptr_mat[18]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[19]); // (mi1 mi1 mi1 mi1) res_sum2 = _mm256_fmadd_pd(vec_u1, dup_mr, res_sum2); res_sum2 = _mm256_fmadd_pd(vec_u1f, dup_mi, res_sum2); dup_mr = _mm256_set1_pd(ptr_mat[20]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[21]); // (mi1 mi1 mi1 mi1) res_sum2 = _mm256_fmadd_pd(vec_u2, dup_mr, res_sum2); res_sum2 = _mm256_fmadd_pd(vec_u2f, dup_mi, res_sum2); dup_mr = _mm256_set1_pd(ptr_mat[22]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[23]); // (mi1 mi1 mi1 mi1) res_sum2 = _mm256_fmadd_pd(vec_u3, dup_mr, res_sum2); res_sum2 = _mm256_fmadd_pd(vec_u3f, dup_mi, res_sum2); dup_mr = _mm256_set1_pd(ptr_mat[24]); // (mr0 mr0 mr0 mr0) dup_mi = _mm256_set1_pd(ptr_mat[25]); // (mi0 mi0 mi0 mi0) res_sum3 = _mm256_mul_pd(vec_u0, dup_mr); res_sum3 = _mm256_fmadd_pd(vec_u0f, dup_mi, res_sum3); dup_mr = _mm256_set1_pd(ptr_mat[26]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[27]); // (mi1 mi1 mi1 mi1) res_sum3 = _mm256_fmadd_pd(vec_u1, dup_mr, res_sum3); res_sum3 = _mm256_fmadd_pd(vec_u1f, dup_mi, res_sum3); dup_mr = _mm256_set1_pd(ptr_mat[28]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[29]); // (mi1 mi1 mi1 mi1) res_sum3 = _mm256_fmadd_pd(vec_u2, dup_mr, res_sum3); res_sum3 = _mm256_fmadd_pd(vec_u2f, dup_mi, res_sum3); dup_mr = _mm256_set1_pd(ptr_mat[30]); // (mr1 mr1 mr1 mr1) dup_mi = _mm256_set1_pd(ptr_mat[31]); // (mi1 mi1 mi1 mi1) res_sum3 = _mm256_fmadd_pd(vec_u3, dup_mr, res_sum3); res_sum3 = _mm256_fmadd_pd(vec_u3f, dup_mi, res_sum3); if (target_qubit_index1 == 0) { res_sum1 = _mm256_permute4x64_pd(res_sum1, 78); // (3 2 1 0) -> (1 0 3 2) 1*2 + 4*3 + 16*0 + 64*1 = 64+12+2 = 78 res_sum3 = _mm256_permute4x64_pd(res_sum3, 78); vec_bef0 = _mm256_blend_pd(res_sum0, res_sum1, 12); // (a a b b) = 1*0 + 2*0 + 4*1 + 8*1 = 12 vec_aft0 = _mm256_blend_pd(res_sum0, res_sum1, 3); // (a a b b) = 1*0 + 2*0 + 4*1 + 8*1 = 12 vec_bef1 = _mm256_blend_pd(res_sum2, res_sum3, 12); // (a a b b) = 1*0 + 2*0 + 4*1 + 8*1 = 12 vec_aft1 = _mm256_blend_pd(res_sum2, res_sum3, 3); // (a a b b) = 1*0 + 2*0 + 4*1 + 8*1 = 12 vec_aft0 = _mm256_permute4x64_pd(vec_aft0, 78); // (3 2 1 0) -> (1 0 3 2) 1*2 + 4*3 + 16*0 + 64*1 = 64+12+2 = 78 vec_aft1 = _mm256_permute4x64_pd(vec_aft1, 78); } else { vec_bef0 = res_sum0; vec_aft0 = res_sum1; vec_bef1 = res_sum2; vec_aft1 = res_sum3; } //// Store _mm256_storeu_pd(ptr_vec + basis00, vec_bef0); // (i1 r1 i0 r0) _mm256_storeu_pd(ptr_vec + basis00 + 4, vec_aft0); // (i3 r3 i2 r2) _mm256_storeu_pd(ptr_vec + basis10, vec_bef1); _mm256_storeu_pd(ptr_vec + basis10 + 4, vec_aft1); } } void double_qubit_dense_matrix_gate_simd(UINT target_qubit_index1, UINT target_qubit_index2, const CTYPE mat[16], CTYPE* vec, ITYPE dim) { assert(target_qubit_index1 != target_qubit_index2); if (dim == 4) { // avx2 code cannot use for 2-qubit state double_qubit_dense_matrix_gate_nosimd(target_qubit_index1, target_qubit_index2, mat, vec, dim); } else if (target_qubit_index1 >= 2 && target_qubit_index2 >= 2) { double_qubit_dense_matrix_gate_simd_high(target_qubit_index1, target_qubit_index2, mat, vec, dim); } else if (target_qubit_index1 >= 2 || target_qubit_index2 >= 2) { double_qubit_dense_matrix_gate_simd_middle(target_qubit_index1, target_qubit_index2, mat, vec, dim); } else { double_qubit_dense_matrix_gate_simd_low(target_qubit_index1, target_qubit_index2, mat, vec, dim); } } #endif
lib.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <float.h> #include <omp.h> double calculate_euclidean_distance(double *atom_i, double *atom_j) { return sqrt(pow(atom_i[0] - atom_j[0], 2) + pow(atom_i[1] - atom_j[1], 2) + pow(atom_i[2] - atom_j[2], 2)); } double calculate_dynamic_energy(double euclidean_distance, double sigma) { double number; if (euclidean_distance != 0) { number = sigma / euclidean_distance; } else { number = DBL_MAX; } return pow(number, 12) - pow(number, 6); } double lennard_jones_function(double *atoms_position, int n, double epsilon, double sigma) { double total_energy = 0.0; int i, j; for (i = 0; i < n - 1; ++i) { for (j = i + 1; j < n; ++j) { double euclidean_distance = calculate_euclidean_distance(atoms_position + i*3, atoms_position + j*3); double dynamic_energy = calculate_dynamic_energy(euclidean_distance, sigma); total_energy += dynamic_energy; } } total_energy *= 4 * epsilon; return total_energy; } void evaluate(double *population, double *values, int population_size, int number_of_atoms){ int i; #pragma omp parallel for for(i = 0; i < population_size; ++i) { values[i] = lennard_jones_function(&population[i * number_of_atoms * 3], number_of_atoms, 1, 1); } }
ordered-4.c
void f1 (void) { int i, j; #pragma omp critical { #pragma omp simd for (i = 0; i < 64; i++) { #pragma omp ordered simd ; } } #pragma omp ordered threads { #pragma omp simd for (i = 0; i < 64; i++) { #pragma omp ordered simd ; } } #pragma omp task { #pragma omp simd for (i = 0; i < 64; i++) { #pragma omp ordered simd ; } } #pragma omp taskloop for (j = 0; j < 64; j++) #pragma omp simd for (i = 0; i < 64; i++) { #pragma omp ordered simd ; } } void f2 (void) { #pragma omp ordered simd ; } void f3 (void) { #pragma omp ordered threads , simd ; }
GB_binop__times_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__times_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__times_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__times_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_fp64) // A*D function (colscale): GB (_AxD__times_fp64) // D*A function (rowscale): GB (_DxB__times_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__times_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__times_fp64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_fp64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_fp64) // C=scalar+B GB (_bind1st__times_fp64) // C=scalar+B' GB (_bind1st_tran__times_fp64) // C=A+scalar GB (_bind2nd__times_fp64) // C=A'+scalar GB (_bind2nd_tran__times_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_FP64 || GxB_NO_TIMES_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__times_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__times_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__times_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
broadcast_binary_operation.h
/* Copyright 2021 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef __NUMPY_BROADCAST_BINARY_OPERATION_H__ #define __NUMPY_BROADCAST_BINARY_OPERATION_H__ #include "point_task.h" namespace legate { namespace numpy { #if defined(LEGATE_USE_CUDA) && defined(__CUDACC__) template <int DIM, typename BinaryFunction, typename Args> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) gpu_broadcast_binary_op(const Args args, const bool dense) { const size_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= args.volume) return; BinaryFunction func; if (dense) { args.outptr[idx] = func(args.inptr[idx], args.scalar); } else { const Legion::Point<DIM> point = args.pitches.unflatten(idx, args.rect.lo); args.out[point] = func(args.in[point], args.scalar); } } #endif // Base class for all Legate's binary operation tasks template <class Derived, class BinaryFunction> class BroadcastBinaryOperationTask : public PointTask<Derived> { private: using first_argument_type = typename BinaryFunction::first_argument_type; using second_argument_type = typename BinaryFunction::second_argument_type; using result_type = std::result_of_t<BinaryFunction(first_argument_type, second_argument_type)>; public: static_assert(std::is_same<first_argument_type, second_argument_type>::value, "BroadcastBinaryOperation currently requires first_argument_type and " "second_argument_type to be the same type."); static const int TASK_ID = task_id<BinaryFunction::op_code, NUMPY_BROADCAST_VARIANT_OFFSET, result_type, first_argument_type, second_argument_type>; // out_region = in_region1 op scalar static const int REGIONS = 2; template <int N> struct DeserializedArgs { Legion::Rect<N> rect; AccessorWO<result_type, N> out; AccessorRO<first_argument_type, N> in; Pitches<N - 1> pitches; size_t volume; second_argument_type scalar; result_type* outptr; const first_argument_type* inptr; bool deserialize(LegateDeserializer& derez, const Legion::Task* task, const std::vector<Legion::PhysicalRegion>& regions) { rect = NumPyProjectionFunctor::unpack_shape<N>(task, derez); out = derez.unpack_accessor_WO<result_type, N>(regions[0], rect); in = derez.unpack_accessor_RO<first_argument_type, N>(regions[1], rect); scalar = task->futures[0].get_result<second_argument_type>(true /*silence warnings*/); volume = pitches.flatten(rect); #ifndef LEGION_BOUNDS_CHECKS // Check to see if this is dense or not return out.accessor.is_dense_row_major(rect) && in.accessor.is_dense_row_major(rect) && (outptr = out.ptr(rect)) && (inptr = in.ptr(rect)); #else // No dense execution if we're doing bounds checks return false; #endif } }; template <int DIM> static void dispatch_cpu(const Legion::Task* task, const std::vector<Legion::PhysicalRegion>& regions, LegateDeserializer& derez) { DeserializedArgs<DIM> args; const bool dense = args.deserialize(derez, task, regions); if (args.volume == 0) return; BinaryFunction func; if (dense) { for (size_t idx = 0; idx < args.volume; ++idx) args.outptr[idx] = func(args.inptr[idx], args.scalar); } else { const Scalar<second_argument_type, DIM> scalar(args.scalar); CPULoop<DIM>::binary_loop(func, args.out, args.in, scalar, args.rect); } } #ifdef LEGATE_USE_OPENMP template <int DIM> static void dispatch_omp(const Legion::Task* task, const std::vector<Legion::PhysicalRegion>& regions, LegateDeserializer& derez) { DeserializedArgs<DIM> args; const bool dense = args.deserialize(derez, task, regions); if (args.volume == 0) return; BinaryFunction func; if (dense) { #pragma omp parallel for schedule(static) for (size_t idx = 0; idx < args.volume; ++idx) args.outptr[idx] = func(args.inptr[idx], args.scalar); } else { const Scalar<second_argument_type, DIM> scalar(args.scalar); OMPLoop<DIM>::binary_loop(func, args.out, args.in, scalar, args.rect); } } #endif #if defined(LEGATE_USE_CUDA) && defined(__CUDACC__) template <int DIM> static void dispatch_gpu(const Legion::Task* task, const std::vector<Legion::PhysicalRegion>& regions, LegateDeserializer& derez) { DeserializedArgs<DIM> args; const bool dense = args.deserialize(derez, task, regions); if (args.volume == 0) return; const size_t blocks = (args.volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; gpu_broadcast_binary_op<DIM, BinaryFunction, DeserializedArgs<DIM>> <<<blocks, THREADS_PER_BLOCK>>>(args, dense); } #elif defined(LEGATE_USE_CUDA) template <int DIM> static void dispatch_gpu(const Legion::Task* task, const std::vector<Legion::PhysicalRegion>& regions, LegateDeserializer& derez); #endif }; } // namespace numpy } // namespace legate #endif // __NUMPY_BROADCAST_BINARY_OPERATION_H__
displacement_op_opencl.h
// ----------------------------------------------------------------------------- // // Copyright (C) The BioDynaMo Project. // All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef DISPLACEMENT_OP_OPENCL_H_ #define DISPLACEMENT_OP_OPENCL_H_ #include <vector> #ifdef USE_OPENCL #define __CL_ENABLE_EXCEPTIONS #ifdef __APPLE__ #include <OpenCL/cl.hpp> #else #include <CL/cl.hpp> #endif #endif #include "bound_space_op.h" #include "gpu/gpu_helper.h" #include "grid.h" #include "resource_manager.h" #include "shape.h" #include "type_util.h" namespace bdm { /// Defines the 3D physical interactions between physical objects template <typename TSimulation = Simulation<>> class DisplacementOpOpenCL { public: DisplacementOpOpenCL() {} ~DisplacementOpOpenCL() {} template <typename TContainer> typename std::enable_if<is_soa_sphere<TContainer>::value>::type operator()( TContainer* cells, uint16_t type_idx) const { #ifdef USE_OPENCL auto* sim = TSimulation::GetActive(); auto* grid = sim->GetGrid(); auto* rm = sim->GetResourceManager(); auto* param = sim->GetParam(); auto context = rm->GetOpenCLContext(); auto queue = rm->GetOpenCLCommandQueue(); auto programs = rm->GetOpenCLProgramList(); std::vector<cl_double> mass(cells->size()); std::vector<std::array<cl_double, 3>> cell_movements(cells->size()); std::vector<cl_uint> gpu_starts; std::vector<cl_ushort> gpu_lengths; std::vector<cl_uint> successors(cells->size()); cl_uint box_length; std::array<cl_uint, 3> num_boxes_axis; std::array<cl_int, 3> grid_dimensions; cl_double squared_radius = grid->GetLargestObjectSize() * grid->GetLargestObjectSize(); // We need to create a mass vector, because it is not stored by default in // a cell container cells->FillMassVector(&mass); grid->GetSuccessors(&successors); grid->GetBoxInfo(&gpu_starts, &gpu_lengths); grid->GetGridInfo(&box_length, &num_boxes_axis, &grid_dimensions); // Allocate GPU buffers cl::Buffer positions_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, cells->size() * 3 * sizeof(cl_double), cells->GetPositionPtr()); cl::Buffer diameters_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, cells->size() * sizeof(cl_double), cells->GetDiameterPtr()); cl::Buffer tractor_force_arg( *context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, cells->size() * 3 * sizeof(cl_double), cells->GetTractorForcePtr()); cl::Buffer adherence_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, cells->size() * sizeof(cl_double), cells->GetAdherencePtr()); cl::Buffer box_id_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, cells->size() * sizeof(cl_uint), cells->GetBoxIdPtr()); cl::Buffer mass_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, cells->size() * sizeof(cl_double), mass.data()); cl::Buffer cell_movements_arg( *context, CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, cells->size() * 3 * sizeof(cl_double), cell_movements.data()->data()); cl::Buffer starts_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, gpu_starts.size() * sizeof(cl_uint), gpu_starts.data()); cl::Buffer lengths_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, gpu_lengths.size() * sizeof(cl_short), gpu_lengths.data()); cl::Buffer successors_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, successors.size() * sizeof(cl_uint), successors.data()); cl::Buffer nba_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, 3 * sizeof(cl_uint), num_boxes_axis.data()); cl::Buffer gd_arg(*context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, 3 * sizeof(cl_int), grid_dimensions.data()); // Create the kernel object from our program // TODO(ahmad): generalize the program selection, in case we have more than // one. We can maintain an unordered map of programs maybe cl::Kernel collide((*programs)[0], "collide"); // Set kernel parameters collide.setArg(0, positions_arg); collide.setArg(1, diameters_arg); collide.setArg(2, tractor_force_arg); collide.setArg(3, adherence_arg); collide.setArg(4, box_id_arg); collide.setArg(5, mass_arg); collide.setArg(6, param->simulation_time_step_); collide.setArg(7, param->simulation_max_displacement_); collide.setArg(8, squared_radius); collide.setArg(9, static_cast<cl_int>(cells->size())); collide.setArg(10, starts_arg); collide.setArg(11, lengths_arg); collide.setArg(12, successors_arg); collide.setArg(13, box_length); collide.setArg(14, nba_arg); collide.setArg(15, gd_arg); collide.setArg(16, cell_movements_arg); // The amount of threads for each work group (analogous to CUDA thread // block) int block_size = 256; auto num_objects = cells->size(); try { // The global size determines the total number of threads that will be // spawned on the GPU, in groups of local_size cl::NDRange global_size = cl::NDRange(num_objects + (block_size - (num_objects % block_size))); cl::NDRange local_size = cl::NDRange(block_size); queue->enqueueNDRangeKernel(collide, cl::NullRange, global_size, local_size); } catch (const cl::Error& err) { Log::Error("DisplacementOpOpenCL", err.what(), "(", err.err(), ") = ", GetErrorString(err.err())); throw; } try { queue->enqueueReadBuffer(cell_movements_arg, CL_TRUE, 0, cells->size() * 3 * sizeof(cl_double), cell_movements.data()->data()); } catch (const cl::Error& err) { Log::Error("DisplacementOpOpenCL", err.what(), "(", err.err(), ") = ", GetErrorString(err.err())); throw; } // set new positions after all updates have been calculated // otherwise some cells would see neighbors with already updated positions // which would lead to inconsistencies #pragma omp parallel for for (size_t i = 0; i < cells->size(); i++) { auto&& cell = (*cells)[i]; cell.UpdatePosition(cell_movements[i]); if (param->bound_space_) { ApplyBoundingBox(&cell, param->min_bound_, param->max_bound_); } cell.SetPosition(cell.GetPosition()); // Reset biological movement to 0. cell.SetTractorForce({0, 0, 0}); } #endif } template <typename TContainer> typename std::enable_if<!is_soa_sphere<TContainer>::value>::type operator()( TContainer* cells, uint16_t type_idx) { Fatal("DisplacementOpCuda", "You tried to compile GPU-specific function calls for a non-SOA data " "structure or non-spherical simulation object."); } }; } // namespace bdm #endif // DISPLACEMENT_OP_OPENCL_H_
par_relax.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Relaxation scheme * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "Common.h" #include "_hypre_lapack.h" #include "par_relax.h" /*-------------------------------------------------------------------------- * hypre_BoomerAMGRelax *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGRelax( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_type, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { HYPRE_Int relax_error = 0; /*--------------------------------------------------------------------------------------- * Switch statement to direct control based on relax_type: * relax_type = 0 -> Jacobi or CF-Jacobi * relax_type = 1 -> Gauss-Seidel <--- very slow, sequential * relax_type = 2 -> Gauss_Seidel: interior points in parallel, * boundary sequential * relax_type = 3 -> hybrid: SOR-J mix off-processor, SOR on-processor * with outer relaxation parameters (forward solve) * relax_type = 4 -> hybrid: SOR-J mix off-processor, SOR on-processor * with outer relaxation parameters (backward solve) * relax_type = 5 -> hybrid: GS-J mix off-processor, chaotic GS on-node * relax_type = 6 -> hybrid: SSOR-J mix off-processor, SSOR on-processor * with outer relaxation parameters * relax_type = 7 -> Jacobi (uses Matvec), only needed in CGNR [GPU-supported, CF supported with redundant computation] * relax_type = 8 -> hybrid L1 Symm. Gauss-Seidel * relax_type = 9 -> Direct solve, Gaussian elimination * relax_type = 10 -> On-processor direct forward solve for matrices with * triangular structure (indices need not be ordered * triangular) * relax_type = 11 -> Two Stage approximation to GS. Uses the strict lower * part of the diagonal matrix * relax_type = 12 -> Two Stage approximation to GS. Uses the strict lower * part of the diagonal matrix and a second iteration * for additional error approximation * relax_type = 13 -> hybrid L1 Gauss-Seidel forward solve * relax_type = 14 -> hybrid L1 Gauss-Seidel backward solve * relax_type = 15 -> CG * relax_type = 16 -> Scaled Chebyshev * relax_type = 17 -> FCF-Jacobi * relax_type = 18 -> L1-Jacobi [GPU-supported through call to relax7Jacobi] * relax_type = 19 -> Direct Solve, (old version) * relax_type = 20 -> Kaczmarz * relax_type = 29 -> Direct solve: use gaussian elimination & BLAS * (with pivoting) (old version) * relax_type = 98 -> Direct solve, Gaussian elimination * relax_type = 99 -> Direct solve, Gaussian elimination * relax_type = 199-> Direct solve, Gaussian elimination *-------------------------------------------------------------------------------------*/ switch (relax_type) { case 0: /* Weighted Jacobi */ hypre_BoomerAMGRelax0WeightedJacobi(A, f, cf_marker, relax_points, relax_weight, u, Vtemp); break; case 1: /* Gauss-Seidel VERY SLOW */ hypre_BoomerAMGRelax1GaussSeidel(A, f, cf_marker, relax_points, u); break; case 2: /* Gauss-Seidel: relax interior points in parallel, boundary sequentially */ hypre_BoomerAMGRelax2GaussSeidel(A, f, cf_marker, relax_points, u); break; /* Hybrid: Jacobi off-processor, Gauss-Seidel on-processor (forward loop) */ case 3: hypre_BoomerAMGRelax3HybridGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp); break; case 4: /* Hybrid: Jacobi off-processor, Gauss-Seidel/SOR on-processor (backward loop) */ hypre_BoomerAMGRelax4HybridGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp); break; case 5: /* Hybrid: Jacobi off-processor, chaotic Gauss-Seidel on-processor */ hypre_BoomerAMGRelax5ChaoticHybridGaussSeidel(A, f, cf_marker, relax_points, u); break; case 6: /* Hybrid: Jacobi off-processor, Symm. Gauss-Seidel/SSOR on-processor with outer relaxation parameter */ hypre_BoomerAMGRelax6HybridSSOR(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp); break; case 7: /* Jacobi (uses ParMatvec) */ hypre_BoomerAMGRelax7Jacobi(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp); break; case 8: /* hybrid L1 Symm. Gauss-Seidel */ hypre_BoomerAMGRelax8HybridL1SSOR(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp); break; /* Hybrid: Jacobi off-processor, ordered Gauss-Seidel on-processor */ case 10: hypre_BoomerAMGRelax10TopoOrderedGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp); break; case 11: /* Two Stage Gauss Seidel. Forward sweep only */ hypre_BoomerAMGRelax11TwoStageGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp); break; case 12: /* Two Stage Gauss Seidel. Uses the diagonal matrix for the GS part */ hypre_BoomerAMGRelax12TwoStageGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp); break; case 13: /* hybrid L1 Gauss-Seidel forward solve */ hypre_BoomerAMGRelax13HybridL1GaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp); break; case 14: /* hybrid L1 Gauss-Seidel backward solve */ hypre_BoomerAMGRelax14HybridL1GaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp); break; case 18: /* weighted L1 Jacobi */ hypre_BoomerAMGRelax18WeightedL1Jacobi(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp); break; case 19: /* Direct solve: use gaussian elimination */ relax_error = hypre_BoomerAMGRelax19GaussElim(A, f, u); break; case 20: /* Kaczmarz */ hypre_BoomerAMGRelaxKaczmarz(A, f, omega, l1_norms, u); break; case 98: /* Direct solve: use gaussian elimination & BLAS (with pivoting) */ relax_error = hypre_BoomerAMGRelax98GaussElimPivot(A, f, u); break; } return relax_error; } HYPRE_Int hypre_BoomerAMGRelaxWeightedJacobi_core( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, HYPRE_Int Skip_diag ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Complex *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Complex *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Complex *v_ext_data = NULL; HYPRE_Complex *v_buf_data = NULL; HYPRE_Complex zero = 0.0; HYPRE_Real one_minus_weight = 1.0 - relax_weight; HYPRE_Complex res; HYPRE_Int num_procs, my_id, i, j, ii, jj, index, num_sends, start; hypre_ParCSRCommHandle *comm_handle; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, v_ext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) { Vtemp_data[i] = u_data[i]; } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) { const HYPRE_Complex di = l1_norms ? l1_norms[i] : A_diag_data[A_diag_i[i]]; /*----------------------------------------------------------- * If i is of the right type ( C or F or All ) and diagonal is * nonzero, relax point i; otherwise, skip it. * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------*/ if ( (relax_points == 0 || cf_marker[i] == relax_points) && di != zero ) { res = f_data[i]; for (jj = A_diag_i[i] + Skip_diag; jj < A_diag_i[i + 1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * v_ext_data[ii]; } if (Skip_diag) { u_data[i] *= one_minus_weight; u_data[i] += relax_weight * res / di; } else { u_data[i] += relax_weight * res / di; } } } if (num_procs > 1) { hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGRelax0WeightedJacobi( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, hypre_ParVector *u, hypre_ParVector *Vtemp ) { return hypre_BoomerAMGRelaxWeightedJacobi_core(A, f, cf_marker, relax_points, relax_weight, NULL, u, Vtemp, 1); } HYPRE_Int hypre_BoomerAMGRelax18WeightedL1Jacobi( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; if (exec == HYPRE_EXEC_DEVICE) { // XXX GPU calls Relax7 XXX return hypre_BoomerAMGRelax7Jacobi(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp); } else #endif { /* in the case of non-CF, use relax-7 which is faster */ if (relax_points == 0) { return hypre_BoomerAMGRelax7Jacobi(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp); } else { return hypre_BoomerAMGRelaxWeightedJacobi_core(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp, 0); } } } HYPRE_Int hypre_BoomerAMGRelax1GaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, hypre_ParVector *u ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Complex *f_data = hypre_VectorData(f_local); HYPRE_Complex *v_ext_data = NULL; HYPRE_Complex *v_buf_data = NULL; HYPRE_Complex zero = 0.0; HYPRE_Complex res; HYPRE_Int num_procs, my_id, i, j, ii, jj, p, jr, ip, num_sends, num_recvs, vec_start, vec_len; hypre_MPI_Status *status; hypre_MPI_Request *requests; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_recvs + num_sends, HYPRE_MEMORY_HOST); requests = hypre_CTAlloc(hypre_MPI_Request, num_recvs + num_sends, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ for (p = 0; p < num_procs; p++) { jr = 0; if (p != my_id) { for (i = 0; i < num_sends; i++) { ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (ip == p) { vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1) - vec_start; for (j = vec_start; j < vec_start + vec_len; j++) { v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } } hypre_MPI_Waitall(jr, requests, status); hypre_MPI_Barrier(comm); } else { if (num_procs > 1) { for (i = 0; i < num_recvs; i++) { ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i + 1) - vec_start; hypre_MPI_Irecv(&v_ext_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } hypre_MPI_Waitall(jr, requests, status); } for (i = 0; i < num_rows; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------*/ if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_diag_data[A_diag_i[i]] != zero ) { res = f_data[i]; for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * v_ext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } if (num_procs > 1) { hypre_MPI_Barrier(comm); } } } if (num_procs > 1) { hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGRelax2GaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, hypre_ParVector *u ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Complex *f_data = hypre_VectorData(f_local); HYPRE_Complex *v_ext_data = NULL; HYPRE_Complex *v_buf_data = NULL; HYPRE_Complex zero = 0.0; HYPRE_Complex res; HYPRE_Int num_procs, my_id, i, j, ii, jj, p, jr, ip, num_sends, num_recvs, vec_start, vec_len; hypre_MPI_Status *status; hypre_MPI_Request *requests; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_recvs + num_sends, HYPRE_MEMORY_HOST); requests = hypre_CTAlloc(hypre_MPI_Request, num_recvs + num_sends, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------- * Relax interior points first *-----------------------------------------------------------------*/ for (i = 0; i < num_rows; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F or All ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_offd_i[i + 1] - A_offd_i[i] == zero && A_diag_data[A_diag_i[i]] != zero ) { res = f_data[i]; for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } for (p = 0; p < num_procs; p++) { jr = 0; if (p != my_id) { for (i = 0; i < num_sends; i++) { ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (ip == p) { vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1) - vec_start; for (j = vec_start; j < vec_start + vec_len; j++) { v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } } hypre_MPI_Waitall(jr, requests, status); hypre_MPI_Barrier(comm); } else { if (num_procs > 1) { for (i = 0; i < num_recvs; i++) { ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i + 1) - vec_start; hypre_MPI_Irecv(&v_ext_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } hypre_MPI_Waitall(jr, requests, status); } for (i = 0; i < num_rows; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F or All) and diagonal is * nonzero, relax point i; otherwise, skip it. * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------*/ if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_offd_i[i + 1] - A_offd_i[i] != zero && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * v_ext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } if (num_procs > 1) { hypre_MPI_Barrier(comm); } } } if (num_procs > 1) { hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGRelaxHybridGaussSeidel_core( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp, HYPRE_Int GS_order, HYPRE_Int Symm, HYPRE_Int Skip_diag, HYPRE_Int forced_seq, HYPRE_Int Topo_order ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Complex *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = Vtemp ? hypre_ParVectorLocalVector(Vtemp) : NULL; HYPRE_Complex *Vtemp_data = Vtemp_local ? hypre_VectorData(Vtemp_local) : NULL; /* hypre_Vector *Ztemp_local = NULL; HYPRE_Complex *Ztemp_data = NULL; */ HYPRE_Complex *v_ext_data = NULL; HYPRE_Complex *v_buf_data = NULL; HYPRE_Int *proc_ordering = NULL; const HYPRE_Real one_minus_omega = 1.0 - omega; HYPRE_Int num_procs, my_id, num_threads, j, num_sends; hypre_ParCSRCommHandle *comm_handle; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); num_threads = forced_seq ? 1 : hypre_NumThreads(); /* GS order: forward or backward */ const HYPRE_Int gs_order = GS_order > 0 ? 1 : -1; /* for symmetric GS, a forward followed by a backward */ const HYPRE_Int num_sweeps = Symm ? 2 : 1; /* if relax_weight and omega are both 1.0 */ const HYPRE_Int non_scale = relax_weight == 1.0 && omega == 1.0; /* */ const HYPRE_Real prod = 1.0 - relax_weight * omega; /* if (num_threads > 1) { Ztemp_local = hypre_ParVectorLocalVector(Ztemp); Ztemp_data = hypre_VectorData(Ztemp_local); } */ #if defined(HYPRE_USING_PERSISTENT_COMM) // JSP: persistent comm can be similarly used for other smoothers hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (num_procs > 1) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); #if defined(HYPRE_USING_PERSISTENT_COMM) persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg); v_buf_data = (HYPRE_Real *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle); v_ext_data = (HYPRE_Real *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle); #else v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); #endif HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (j = begin; j < end; j++) { v_buf_data[j - begin] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif #if defined(HYPRE_USING_PERSISTENT_COMM) hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_HOST, v_buf_data); #else comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, v_ext_data); #endif #if defined(HYPRE_USING_PERSISTENT_COMM) hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_HOST, v_ext_data); #else hypre_ParCSRCommHandleDestroy(comm_handle); #endif comm_handle = NULL; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif } if (Topo_order) { /* Check for ordering of matrix. If stored, get pointer, otherwise * compute ordering and point matrix variable to array. * Used in AIR */ if (!hypre_ParCSRMatrixProcOrdering(A)) { proc_ordering = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); hypre_topo_sort(A_diag_i, A_diag_j, A_diag_data, proc_ordering, num_rows); hypre_ParCSRMatrixProcOrdering(A) = proc_ordering; } else { proc_ordering = hypre_ParCSRMatrixProcOrdering(A); } } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RELAX] -= hypre_MPI_Wtime(); #endif if ( (num_threads > 1 || !non_scale) && Vtemp_data ) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_rows; j++) { Vtemp_data[j] = u_data[j]; } } if (num_threads > 1) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { HYPRE_Int ns, ne, sweep; hypre_partition1D(num_rows, num_threads, j, &ns, &ne); for (sweep = 0; sweep < num_sweeps; sweep++) { const HYPRE_Int iorder = num_sweeps == 1 ? gs_order : sweep == 0 ? 1 : -1; const HYPRE_Int ibegin = iorder > 0 ? ns : ne - 1; const HYPRE_Int iend = iorder > 0 ? ne : ns - 1; if (non_scale) { hypre_HybridGaussSeidelNSThreads(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data, f_data, cf_marker, relax_points, l1_norms, u_data, Vtemp_data, v_ext_data, ns, ne, ibegin, iend, iorder, Skip_diag); } else { hypre_HybridGaussSeidelThreads(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data, f_data, cf_marker, relax_points, relax_weight, omega, one_minus_omega, prod, l1_norms, u_data, Vtemp_data, v_ext_data, ns, ne, ibegin, iend, iorder, Skip_diag); } } /* for (sweep = 0; sweep < num_sweeps; sweep++) */ } /* for (j = 0; j < num_threads; j++) */ } else /* if (num_threads > 1) */ { HYPRE_Int sweep; for (sweep = 0; sweep < num_sweeps; sweep++) { const HYPRE_Int iorder = num_sweeps == 1 ? gs_order : sweep == 0 ? 1 : -1; const HYPRE_Int ibegin = iorder > 0 ? 0 : num_rows - 1; const HYPRE_Int iend = iorder > 0 ? num_rows : -1; if (Topo_order) { hypre_HybridGaussSeidelOrderedNS(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data, f_data, cf_marker, relax_points, u_data, NULL, v_ext_data, ibegin, iend, iorder, proc_ordering); } else { if (non_scale) { hypre_HybridGaussSeidelNS(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data, f_data, cf_marker, relax_points, l1_norms, u_data, Vtemp_data, v_ext_data, ibegin, iend, iorder, Skip_diag); } else { hypre_HybridGaussSeidel(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data, f_data, cf_marker, relax_points, relax_weight, omega, one_minus_omega, prod, l1_norms, u_data, Vtemp_data, v_ext_data, ibegin, iend, iorder, Skip_diag); } } } /* for (sweep = 0; sweep < num_sweeps; sweep++) */ } /* if (num_threads > 1) */ #ifndef HYPRE_USING_PERSISTENT_COMM if (num_procs > 1) { hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RELAX] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } /* forward hybrid G-S */ HYPRE_Int hypre_BoomerAMGRelax3HybridGaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; // TODO implement CF relax on GPUs if (relax_points != 0) { exec = HYPRE_EXEC_HOST; } #if defined(HYPRE_USING_GPU) if (hypre_HandleDeviceGSMethod(hypre_handle()) == 0) { exec = HYPRE_EXEC_HOST; } #endif if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, 1 /* forward */, 0 /* nonsymm */); } else #endif { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, 1 /* forward */, 0 /* nonsymm */, 1 /* skip diag */, 0, 0); } } /* backward hybrid G-S */ HYPRE_Int hypre_BoomerAMGRelax4HybridGaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; // TODO implement CF relax on GPUs if (relax_points != 0) { exec = HYPRE_EXEC_HOST; } #if defined(HYPRE_USING_GPU) if (hypre_HandleDeviceGSMethod(hypre_handle()) == 0) { exec = HYPRE_EXEC_HOST; } #endif if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, -1 /* backward */, 0 /* nonsymm */); } else #endif { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, -1 /* backward */, 0 /* nosymm */, 1 /* skip diag */, 0, 0); } } /* chaotic forward G-S */ HYPRE_Int hypre_BoomerAMGRelax5ChaoticHybridGaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, hypre_ParVector *u ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Complex *f_data = hypre_VectorData(f_local); HYPRE_Complex *v_ext_data = NULL; HYPRE_Complex *v_buf_data = NULL; HYPRE_Complex zero = 0.0; HYPRE_Complex res; HYPRE_Int num_procs, my_id, i, j, ii, jj, index, num_sends, start; hypre_ParCSRCommHandle *comm_handle; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, v_ext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F or All) and diagonal is * nonzero, relax point i; otherwise, skip it. * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------*/ if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_diag_data[A_diag_i[i]] != zero ) { res = f_data[i]; for (jj = A_diag_i[i] + 1; jj < A_diag_i[i + 1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * v_ext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } if (num_procs > 1) { hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /* symmetric hybrid G-S */ HYPRE_Int hypre_BoomerAMGRelax6HybridSSOR( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; // TODO implement CF relax on GPUs if (relax_points != 0) { exec = HYPRE_EXEC_HOST; } #if defined(HYPRE_USING_GPU) if (hypre_HandleDeviceGSMethod(hypre_handle()) == 0) { exec = HYPRE_EXEC_HOST; } #endif if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, 1, 1 /* symm */); } else #endif { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, 1, 1 /* symm */, 1 /* skip diag */, 0, 0); } } HYPRE_Int hypre_BoomerAMGRelax7Jacobi( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp ) { HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_Vector l1_norms_vec; hypre_ParVector l1_norms_parvec; hypre_VectorData(&l1_norms_vec) = l1_norms; hypre_VectorSize(&l1_norms_vec) = num_rows; /* TODO XXX * The next line is NOT 100% correct, which should be the memory location of l1_norms instead of f * But how do I know it? As said, don't use raw pointers, don't use raw pointers! * It is fine normally since A, f, and l1_norms should live in the same memory space */ hypre_VectorMemoryLocation(&l1_norms_vec) = hypre_ParVectorMemoryLocation(f); hypre_ParVectorLocalVector(&l1_norms_parvec) = &l1_norms_vec; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_Int sync_stream; hypre_GetSyncCudaCompute(&sync_stream); hypre_SetSyncCudaCompute(0); #endif /*----------------------------------------------------------------- * Copy f into temporary vector. *-----------------------------------------------------------------*/ hypre_ParVectorCopy(f, Vtemp); /*----------------------------------------------------------------- * Perform Matvec Vtemp = w * (f - Au) *-----------------------------------------------------------------*/ hypre_ParCSRMatrixMatvec(-relax_weight, A, u, relax_weight, Vtemp); /*----------------------------------------------------------------- * u += D^{-1} * Vtemp, where D_ii = ||A(i,:)||_1 *-----------------------------------------------------------------*/ if (relax_points) { hypre_ParVectorElmdivpyMarked(Vtemp, &l1_norms_parvec, u, cf_marker, relax_points); } else { hypre_ParVectorElmdivpy(Vtemp, &l1_norms_parvec, u); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_SetSyncCudaCompute(sync_stream); hypre_SyncCudaComputeStream(hypre_handle()); #endif return hypre_error_flag; } /* symmetric l1 hybrid G-S */ HYPRE_Int hypre_BoomerAMGRelax8HybridL1SSOR( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { const HYPRE_Int skip_diag = relax_weight == 1.0 && omega == 1.0 ? 0 : 1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; // TODO implement CF relax on GPUs if (relax_points != 0) { exec = HYPRE_EXEC_HOST; } #if defined(HYPRE_USING_GPU) if (hypre_HandleDeviceGSMethod(hypre_handle()) == 0) { exec = HYPRE_EXEC_HOST; } #endif if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp, 1, 1 /* symm */); } else #endif { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp, 1, 1 /* symm */, skip_diag, 0, 0); } } /* forward hybrid topology ordered G-S */ HYPRE_Int hypre_BoomerAMGRelax10TopoOrderedGaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, 1 /* forward */, 0 /* nonsymm */, 1 /* skip_diag */, 1, 1); } /* forward l1 hybrid G-S */ HYPRE_Int hypre_BoomerAMGRelax13HybridL1GaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { const HYPRE_Int skip_diag = relax_weight == 1.0 && omega == 1.0 ? 0 : 1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; // TODO implement CF relax on GPUs if (relax_points != 0) { exec = HYPRE_EXEC_HOST; } #if defined(HYPRE_USING_GPU) if (hypre_HandleDeviceGSMethod(hypre_handle()) == 0) { exec = HYPRE_EXEC_HOST; } #endif if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp, 1, 0 /* nonsymm */); } else #endif { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp, 1 /* forward */, 0 /* nonsymm */, skip_diag, 0, 0 ); } } /* backward l1 hybrid G-S */ HYPRE_Int hypre_BoomerAMGRelax14HybridL1GaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { const HYPRE_Int skip_diag = relax_weight == 1.0 && omega == 1.0 ? 0 : 1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; // TODO implement CF relax on GPUs if (relax_points != 0) { exec = HYPRE_EXEC_HOST; } #if defined(HYPRE_USING_GPU) if (hypre_HandleDeviceGSMethod(hypre_handle()) == 0) { exec = HYPRE_EXEC_HOST; } #endif if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp, -1, 0 /* nonsymm */); } else #endif { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp, -1 /* backward */, 0 /* nonsymm */, skip_diag, 0, 0 ); } } HYPRE_Int hypre_BoomerAMGRelax19GaussElim( hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt first_ind = hypre_ParVectorFirstIndex(u); HYPRE_Int n_global = (HYPRE_Int) global_num_rows; HYPRE_Int first_index = (HYPRE_Int) first_ind; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_CSRMatrix *A_CSR; HYPRE_Int *A_CSR_i; HYPRE_Int *A_CSR_j; HYPRE_Real *A_CSR_data; hypre_Vector *f_vector; HYPRE_Real *f_vector_data; HYPRE_Real *A_mat; HYPRE_Real *b_vec; HYPRE_Int i, jj, column, relax_error = 0; /*----------------------------------------------------------------- * Generate CSR matrix from ParCSRMatrix A *-----------------------------------------------------------------*/ /* all processors are needed for these routines */ A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A); f_vector = hypre_ParVectorToVectorAll(f); if (num_rows) { A_CSR_i = hypre_CSRMatrixI(A_CSR); A_CSR_j = hypre_CSRMatrixJ(A_CSR); A_CSR_data = hypre_CSRMatrixData(A_CSR); f_vector_data = hypre_VectorData(f_vector); A_mat = hypre_CTAlloc(HYPRE_Real, n_global * n_global, HYPRE_MEMORY_HOST); b_vec = hypre_CTAlloc(HYPRE_Real, n_global, HYPRE_MEMORY_HOST); /*--------------------------------------------------------------- * Load CSR matrix into A_mat. *---------------------------------------------------------------*/ for (i = 0; i < n_global; i++) { for (jj = A_CSR_i[i]; jj < A_CSR_i[i + 1]; jj++) { column = A_CSR_j[jj]; A_mat[i * n_global + column] = A_CSR_data[jj]; } b_vec[i] = f_vector_data[i]; } hypre_gselim(A_mat, b_vec, n_global, relax_error); for (i = 0; i < num_rows; i++) { u_data[i] = b_vec[first_index + i]; } hypre_TFree(A_mat, HYPRE_MEMORY_HOST); hypre_TFree(b_vec, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } else { hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } return relax_error; } HYPRE_Int hypre_BoomerAMGRelax98GaussElimPivot( hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt first_ind = hypre_ParVectorFirstIndex(u); HYPRE_Int n_global = (HYPRE_Int) global_num_rows; HYPRE_Int first_index = (HYPRE_Int) first_ind; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_CSRMatrix *A_CSR; HYPRE_Int *A_CSR_i; HYPRE_Int *A_CSR_j; HYPRE_Real *A_CSR_data; hypre_Vector *f_vector; HYPRE_Real *f_vector_data; HYPRE_Real *A_mat; HYPRE_Real *b_vec; HYPRE_Int i, jj, column, relax_error = 0; HYPRE_Int info; HYPRE_Int one_i = 1; HYPRE_Int *piv; /*----------------------------------------------------------------- * Generate CSR matrix from ParCSRMatrix A *-----------------------------------------------------------------*/ /* all processors are needed for these routines */ A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A); f_vector = hypre_ParVectorToVectorAll(f); if (num_rows) { A_CSR_i = hypre_CSRMatrixI(A_CSR); A_CSR_j = hypre_CSRMatrixJ(A_CSR); A_CSR_data = hypre_CSRMatrixData(A_CSR); f_vector_data = hypre_VectorData(f_vector); A_mat = hypre_CTAlloc(HYPRE_Real, n_global * n_global, HYPRE_MEMORY_HOST); b_vec = hypre_CTAlloc(HYPRE_Real, n_global, HYPRE_MEMORY_HOST); /*--------------------------------------------------------------- * Load CSR matrix into A_mat. *---------------------------------------------------------------*/ for (i = 0; i < n_global; i++) { for (jj = A_CSR_i[i]; jj < A_CSR_i[i + 1]; jj++) { /* need col major */ column = A_CSR_j[jj]; A_mat[i + n_global * column] = A_CSR_data[jj]; } b_vec[i] = f_vector_data[i]; } piv = hypre_CTAlloc(HYPRE_Int, n_global, HYPRE_MEMORY_HOST); /* write over A with LU */ hypre_dgetrf(&n_global, &n_global, A_mat, &n_global, piv, &info); /*now b_vec = inv(A)*b_vec */ hypre_dgetrs("N", &n_global, &one_i, A_mat, &n_global, piv, b_vec, &n_global, &info); hypre_TFree(piv, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows; i++) { u_data[i] = b_vec[first_index + i]; } hypre_TFree(A_mat, HYPRE_MEMORY_HOST); hypre_TFree(b_vec, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } else { hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } return relax_error; } HYPRE_Int hypre_BoomerAMGRelaxKaczmarz( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Complex *f_data = hypre_VectorData(f_local); HYPRE_Complex *u_offd_data = NULL; HYPRE_Complex *u_buf_data = NULL; HYPRE_Complex res; HYPRE_Int num_procs, my_id, i, j, index, num_sends, start; hypre_ParCSRCommHandle *comm_handle; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (num_procs > 1) { if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); u_buf_data = hypre_TAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); u_offd_data = hypre_TAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, u_buf_data, u_offd_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(u_buf_data, HYPRE_MEMORY_HOST); } /* Forward local pass */ for (i = 0; i < num_rows; i++) { res = f_data[i]; for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { res -= A_diag_data[j] * u_data[A_diag_j[j]]; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { res -= A_offd_data[j] * u_offd_data[A_offd_j[j]]; } res /= l1_norms[i]; for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { u_data[A_diag_j[j]] += omega * res * A_diag_data[j]; } } /* Backward local pass */ for (i = num_rows - 1; i > -1; i--) { res = f_data[i]; for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { res -= A_diag_data[j] * u_data[A_diag_j[j]]; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { res -= A_offd_data[j] * u_offd_data[A_offd_j[j]]; } res /= l1_norms[i]; for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { u_data[A_diag_j[j]] += omega * res * A_diag_data[j]; } } hypre_TFree(u_offd_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGRelaxTwoStageGaussSeidelHost( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, HYPRE_Int num_inner_iters) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Complex *Vtemp_data = hypre_VectorData(Vtemp_local); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); HYPRE_Int i, k, jj, ii; HYPRE_Complex multiplier = 1.0; /* Need to check that EVERY diagonal is nonzero first. If any are, throw exception */ for (i = 0; i < num_rows; i++) { if (A_diag_data[A_diag_i[i]] == 0.0) { hypre_error_in_arg(1); } } hypre_ParCSRMatrixMatvecOutOfPlace(-relax_weight, A, u, relax_weight, f, Vtemp); for (i = 0; i < num_rows; i++) /* Run the smoother */ { // V = V/D Vtemp_data[i] /= A_diag_data[A_diag_i[i]]; // u = u + m*v u_data[i] += multiplier * Vtemp_data[i]; } // adjust for the alternating series multiplier *= -1.0; for (k = 0; k < num_inner_iters; ++k) { // By going from bottom to top, we can update Vtemp in place because // we're operating with the strict, lower triangular matrix for (i = num_rows - 1; i >= 0; i--) /* Run the smoother */ { // spmv for the row first HYPRE_Complex res = 0.0; for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { ii = A_diag_j[jj]; if (ii < i) { res += A_diag_data[jj] * Vtemp_data[ii]; } } // diagonal scaling has to come after the spmv accumulation. It's a row scaling // not column Vtemp_data[i] = res / A_diag_data[A_diag_i[i]]; u_data[i] += multiplier * Vtemp_data[i]; } // adjust for the alternating series multiplier *= -1.0; } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGRelax11TwoStageGaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; if (exec == HYPRE_EXEC_DEVICE) { hypre_BoomerAMGRelaxTwoStageGaussSeidelDevice(A, f, relax_weight, omega, u, Vtemp, Ztemp, 1); } else #endif { hypre_BoomerAMGRelaxTwoStageGaussSeidelHost(A, f, relax_weight, omega, u, Vtemp, 1); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGRelax12TwoStageGaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; if (exec == HYPRE_EXEC_DEVICE) { hypre_BoomerAMGRelaxTwoStageGaussSeidelDevice(A, f, relax_weight, omega, u, Vtemp, Ztemp, 2); } else #endif { hypre_BoomerAMGRelaxTwoStageGaussSeidelHost(A, f, relax_weight, omega, u, Vtemp, 2); } return hypre_error_flag; }
pzgstrs.c
/*! \file Copyright (c) 2003, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from U.S. Dept. of Energy) All rights reserved. The source code is distributed under BSD license, see the file License.txt at the top-level directory. */ /*! @file * \brief Solves a system of distributed linear equations A*X = B with a * general N-by-N matrix A using the LU factors computed previously. * * <pre> * -- Distributed SuperLU routine (version 6.1) -- * Lawrence Berkeley National Lab, Univ. of California Berkeley. * October 15, 2008 * September 18, 2018 version 6.0 * February 8, 2019 version 6.1.1 * </pre> */ #include <math.h> #include "superlu_zdefs.h" #ifndef CACHELINE #define CACHELINE 64 /* bytes, Xeon Phi KNL, Cori haswell, Edision */ #endif /* * Sketch of the algorithm for L-solve: * ======================= * * Self-scheduling loop: * * while ( not finished ) { .. use message counter to control * * reveive a message; * * if ( message is Xk ) { * perform local block modifications into lsum[]; * lsum[i] -= L_i,k * X[k] * if all local updates done, Isend lsum[] to diagonal process; * * } else if ( message is LSUM ) { .. this must be a diagonal process * accumulate LSUM; * if ( all LSUM are received ) { * perform triangular solve for Xi; * Isend Xi down to the current process column; * perform local block modifications into lsum[]; * } * } * } * * * Auxiliary data structures: lsum[] / ilsum (pointer to lsum array) * ======================= * * lsum[] array (local) * + lsum has "nrhs" columns, row-wise is partitioned by supernodes * + stored by row blocks, column wise storage within a row block * + prepend a header recording the global block number. * * lsum[] ilsum[nsupers + 1] * * ----- * | | | <- header of size 2 --- * --------- <--------------------| | * | | | | | --- * | | | | | |-----------| | * | | | | | | --- * --------- | |-------| | * | | | <- header | | --- * --------- <--------| | |----| | * | | | | | | | --- * | | | | | | | * | | | | | | | * --------- | | * | | | <- header | | * --------- <------------| | * | | | | | | * | | | | | | * | | | | | | * --------- <---------------| */ /*#define ISEND_IRECV*/ /* * Function prototypes */ #ifdef _CRAY fortran void CTRSM(_fcd, _fcd, _fcd, _fcd, int*, int*, doublecomplex*, doublecomplex*, int*, doublecomplex*, int*); _fcd ftcs1; _fcd ftcs2; _fcd ftcs3; #endif /*! \brief * * <pre> * Purpose * ======= * Re-distribute B on the diagonal processes of the 2D process mesh. * * Note * ==== * This routine can only be called after the routine pxgstrs_init(), * in which the structures of the send and receive buffers are set up. * * Arguments * ========= * * B (input) doublecomplex* * The distributed right-hand side matrix of the possibly * equilibrated system. * * m_loc (input) int (local) * The local row dimension of matrix B. * * nrhs (input) int (global) * Number of right-hand sides. * * ldb (input) int (local) * Leading dimension of matrix B. * * fst_row (input) int (global) * The row number of B's first row in the global matrix. * * ilsum (input) int* (global) * Starting position of each supernode in a full array. * * x (output) doublecomplex* * The solution vector. It is valid only on the diagonal processes. * * ScalePermstruct (input) ScalePermstruct_t* * The data structure to store the scaling and permutation vectors * describing the transformations performed to the original matrix A. * * grid (input) gridinfo_t* * The 2D process mesh. * * SOLVEstruct (input) SOLVEstruct_t* * Contains the information for the communication during the * solution phase. * * Return value * ============ * </pre> */ int_t pzReDistribute_B_to_X(doublecomplex *B, int_t m_loc, int nrhs, int_t ldb, int_t fst_row, int_t *ilsum, doublecomplex *x, ScalePermstruct_t *ScalePermstruct, Glu_persist_t *Glu_persist, gridinfo_t *grid, SOLVEstruct_t *SOLVEstruct) { int *SendCnt, *SendCnt_nrhs, *RecvCnt, *RecvCnt_nrhs; int *sdispls, *sdispls_nrhs, *rdispls, *rdispls_nrhs; int *ptr_to_ibuf, *ptr_to_dbuf; int_t *perm_r, *perm_c; /* row and column permutation vectors */ int_t *send_ibuf, *recv_ibuf; doublecomplex *send_dbuf, *recv_dbuf; int_t *xsup, *supno; int_t i, ii, irow, gbi, j, jj, k, knsupc, l, lk, nbrow; int p, procs; pxgstrs_comm_t *gstrs_comm = SOLVEstruct->gstrs_comm; MPI_Request req_i, req_d, *req_send, *req_recv; MPI_Status status, *status_send, *status_recv; int Nreq_recv, Nreq_send, pp, pps, ppr; double t; #if ( DEBUGlevel>=1 ) CHECK_MALLOC(grid->iam, "Enter pzReDistribute_B_to_X()"); #endif /* ------------------------------------------------------------ INITIALIZATION. ------------------------------------------------------------*/ perm_r = ScalePermstruct->perm_r; perm_c = ScalePermstruct->perm_c; procs = grid->nprow * grid->npcol; xsup = Glu_persist->xsup; supno = Glu_persist->supno; SendCnt = gstrs_comm->B_to_X_SendCnt; SendCnt_nrhs = gstrs_comm->B_to_X_SendCnt + procs; RecvCnt = gstrs_comm->B_to_X_SendCnt + 2*procs; RecvCnt_nrhs = gstrs_comm->B_to_X_SendCnt + 3*procs; sdispls = gstrs_comm->B_to_X_SendCnt + 4*procs; sdispls_nrhs = gstrs_comm->B_to_X_SendCnt + 5*procs; rdispls = gstrs_comm->B_to_X_SendCnt + 6*procs; rdispls_nrhs = gstrs_comm->B_to_X_SendCnt + 7*procs; ptr_to_ibuf = gstrs_comm->ptr_to_ibuf; ptr_to_dbuf = gstrs_comm->ptr_to_dbuf; /* ------------------------------------------------------------ NOW COMMUNICATE THE ACTUAL DATA. ------------------------------------------------------------*/ if(procs==1){ // faster memory copy when procs=1 #ifdef _OPENMP #pragma omp parallel default (shared) #endif { #ifdef _OPENMP #pragma omp master #endif { // t = SuperLU_timer_(); #ifdef _OPENMP #pragma omp taskloop private (i,l,irow,k,j,knsupc) untied #endif for (i = 0; i < m_loc; ++i) { irow = perm_c[perm_r[i+fst_row]]; /* Row number in Pc*Pr*B */ k = BlockNum( irow ); knsupc = SuperSize( k ); l = X_BLK( k ); x[l - XK_H].r = k; /* Block number prepended in the header. */ x[l - XK_H].i = 0; irow = irow - FstBlockC(k); /* Relative row number in X-block */ RHS_ITERATE(j) { x[l + irow + j*knsupc] = B[i + j*ldb]; } } } } }else{ k = sdispls[procs-1] + SendCnt[procs-1]; /* Total number of sends */ l = rdispls[procs-1] + RecvCnt[procs-1]; /* Total number of receives */ if ( !(send_ibuf = intMalloc_dist(k + l)) ) ABORT("Malloc fails for send_ibuf[]."); recv_ibuf = send_ibuf + k; if ( !(send_dbuf = doublecomplexMalloc_dist((k + l)* (size_t)nrhs)) ) ABORT("Malloc fails for send_dbuf[]."); recv_dbuf = send_dbuf + k * nrhs; if ( !(req_send = (MPI_Request*) SUPERLU_MALLOC(procs*sizeof(MPI_Request))) ) ABORT("Malloc fails for req_send[]."); if ( !(req_recv = (MPI_Request*) SUPERLU_MALLOC(procs*sizeof(MPI_Request))) ) ABORT("Malloc fails for req_recv[]."); if ( !(status_send = (MPI_Status*) SUPERLU_MALLOC(procs*sizeof(MPI_Status))) ) ABORT("Malloc fails for status_send[]."); if ( !(status_recv = (MPI_Status*) SUPERLU_MALLOC(procs*sizeof(MPI_Status))) ) ABORT("Malloc fails for status_recv[]."); for (p = 0; p < procs; ++p) { ptr_to_ibuf[p] = sdispls[p]; ptr_to_dbuf[p] = sdispls[p] * nrhs; } /* Copy the row indices and values to the send buffer. */ // t = SuperLU_timer_(); for (i = 0, l = fst_row; i < m_loc; ++i, ++l) { irow = perm_c[perm_r[l]]; /* Row number in Pc*Pr*B */ gbi = BlockNum( irow ); p = PNUM( PROW(gbi,grid), PCOL(gbi,grid), grid ); /* Diagonal process */ k = ptr_to_ibuf[p]; send_ibuf[k] = irow; ++ptr_to_ibuf[p]; k = ptr_to_dbuf[p]; RHS_ITERATE(j) { /* RHS is stored in row major in the buffer. */ send_dbuf[k++] = B[i + j*ldb]; } ptr_to_dbuf[p] += nrhs; } // t = SuperLU_timer_() - t; // printf(".. copy to send buffer time\t%8.4f\n", t); #if 0 #if 1 /* Communicate the (permuted) row indices. */ MPI_Alltoallv(send_ibuf, SendCnt, sdispls, mpi_int_t, recv_ibuf, RecvCnt, rdispls, mpi_int_t, grid->comm); /* Communicate the numerical values. */ MPI_Alltoallv(send_dbuf, SendCnt_nrhs, sdispls_nrhs, SuperLU_MPI_DOUBLE_COMPLEX, recv_dbuf, RecvCnt_nrhs, rdispls_nrhs, SuperLU_MPI_DOUBLE_COMPLEX, grid->comm); #else /* Communicate the (permuted) row indices. */ MPI_Ialltoallv(send_ibuf, SendCnt, sdispls, mpi_int_t, recv_ibuf, RecvCnt, rdispls, mpi_int_t, grid->comm, &req_i); /* Communicate the numerical values. */ MPI_Ialltoallv(send_dbuf, SendCnt_nrhs, sdispls_nrhs, SuperLU_MPI_DOUBLE_COMPLEX, recv_dbuf, RecvCnt_nrhs, rdispls_nrhs, SuperLU_MPI_DOUBLE_COMPLEX, grid->comm, &req_d); MPI_Wait(&req_i,&status); MPI_Wait(&req_d,&status); #endif #endif MPI_Barrier( grid->comm ); Nreq_send=0; Nreq_recv=0; for (pp=0;pp<procs;pp++){ pps = grid->iam+1+pp; if(pps>=procs)pps-=procs; if(pps<0)pps+=procs; ppr = grid->iam-1+pp; if(ppr>=procs)ppr-=procs; if(ppr<0)ppr+=procs; if(SendCnt[pps]>0){ MPI_Isend(&send_ibuf[sdispls[pps]], SendCnt[pps], mpi_int_t, pps, 0, grid->comm, &req_send[Nreq_send] ); Nreq_send++; } if(RecvCnt[ppr]>0){ MPI_Irecv(&recv_ibuf[rdispls[ppr]], RecvCnt[ppr], mpi_int_t, ppr, 0, grid->comm, &req_recv[Nreq_recv] ); Nreq_recv++; } } if(Nreq_send>0)MPI_Waitall(Nreq_send,req_send,status_send); if(Nreq_recv>0)MPI_Waitall(Nreq_recv,req_recv,status_recv); Nreq_send=0; Nreq_recv=0; for (pp=0;pp<procs;pp++){ pps = grid->iam+1+pp; if(pps>=procs)pps-=procs; if(pps<0)pps+=procs; ppr = grid->iam-1+pp; if(ppr>=procs)ppr-=procs; if(ppr<0)ppr+=procs; if(SendCnt_nrhs[pps]>0){ MPI_Isend(&send_dbuf[sdispls_nrhs[pps]], SendCnt_nrhs[pps], SuperLU_MPI_DOUBLE_COMPLEX, pps, 1, grid->comm, &req_send[Nreq_send] ); Nreq_send++; } if(RecvCnt_nrhs[ppr]>0){ MPI_Irecv(&recv_dbuf[rdispls_nrhs[ppr]], RecvCnt_nrhs[ppr], SuperLU_MPI_DOUBLE_COMPLEX, ppr, 1, grid->comm, &req_recv[Nreq_recv] ); Nreq_recv++; } } if(Nreq_send>0)MPI_Waitall(Nreq_send,req_send,status_send); if(Nreq_recv>0)MPI_Waitall(Nreq_recv,req_recv,status_recv); /* ------------------------------------------------------------ Copy buffer into X on the diagonal processes. ------------------------------------------------------------*/ // t = SuperLU_timer_(); ii = 0; for (p = 0; p < procs; ++p) { jj = rdispls_nrhs[p]; for (i = 0; i < RecvCnt[p]; ++i) { /* Only the diagonal processes do this; the off-diagonal processes have 0 RecvCnt. */ irow = recv_ibuf[ii]; /* The permuted row index. */ k = BlockNum( irow ); knsupc = SuperSize( k ); lk = LBi( k, grid ); /* Local block number. */ l = X_BLK( lk ); x[l - XK_H].r = k; /* Block number prepended in the header. */ x[l - XK_H].i = 0; irow = irow - FstBlockC(k); /* Relative row number in X-block */ RHS_ITERATE(j) { x[l + irow + j*knsupc] = recv_dbuf[jj++]; } ++ii; } } // t = SuperLU_timer_() - t; // printf(".. copy to x time\t%8.4f\n", t); SUPERLU_FREE(send_ibuf); SUPERLU_FREE(send_dbuf); SUPERLU_FREE(req_send); SUPERLU_FREE(req_recv); SUPERLU_FREE(status_send); SUPERLU_FREE(status_recv); } #if ( DEBUGlevel>=1 ) CHECK_MALLOC(grid->iam, "Exit pzReDistribute_B_to_X()"); #endif return 0; } /* pzReDistribute_B_to_X */ /*! \brief * * <pre> * Purpose * ======= * Re-distribute X on the diagonal processes to B distributed on all * the processes. * * Note * ==== * This routine can only be called after the routine pxgstrs_init(), * in which the structures of the send and receive buffers are set up. * </pre> */ int_t pzReDistribute_X_to_B(int_t n, doublecomplex *B, int_t m_loc, int_t ldb, int_t fst_row, int_t nrhs, doublecomplex *x, int_t *ilsum, ScalePermstruct_t *ScalePermstruct, Glu_persist_t *Glu_persist, gridinfo_t *grid, SOLVEstruct_t *SOLVEstruct) { int_t i, ii, irow, j, jj, k, knsupc, nsupers, l, lk; int_t *xsup, *supno; int *SendCnt, *SendCnt_nrhs, *RecvCnt, *RecvCnt_nrhs; int *sdispls, *rdispls, *sdispls_nrhs, *rdispls_nrhs; int *ptr_to_ibuf, *ptr_to_dbuf; int_t *send_ibuf, *recv_ibuf; doublecomplex *send_dbuf, *recv_dbuf; int_t *row_to_proc = SOLVEstruct->row_to_proc; /* row-process mapping */ pxgstrs_comm_t *gstrs_comm = SOLVEstruct->gstrs_comm; int iam, p, q, pkk, procs; int_t num_diag_procs, *diag_procs; MPI_Request req_i, req_d, *req_send, *req_recv; MPI_Status status, *status_send, *status_recv; int Nreq_recv, Nreq_send, pp,pps,ppr; #if ( DEBUGlevel>=1 ) CHECK_MALLOC(grid->iam, "Enter pzReDistribute_X_to_B()"); #endif /* ------------------------------------------------------------ INITIALIZATION. ------------------------------------------------------------*/ xsup = Glu_persist->xsup; supno = Glu_persist->supno; nsupers = Glu_persist->supno[n-1] + 1; iam = grid->iam; procs = grid->nprow * grid->npcol; SendCnt = gstrs_comm->X_to_B_SendCnt; SendCnt_nrhs = gstrs_comm->X_to_B_SendCnt + procs; RecvCnt = gstrs_comm->X_to_B_SendCnt + 2*procs; RecvCnt_nrhs = gstrs_comm->X_to_B_SendCnt + 3*procs; sdispls = gstrs_comm->X_to_B_SendCnt + 4*procs; sdispls_nrhs = gstrs_comm->X_to_B_SendCnt + 5*procs; rdispls = gstrs_comm->X_to_B_SendCnt + 6*procs; rdispls_nrhs = gstrs_comm->X_to_B_SendCnt + 7*procs; ptr_to_ibuf = gstrs_comm->ptr_to_ibuf; ptr_to_dbuf = gstrs_comm->ptr_to_dbuf; if(procs==1){ //faster memory copy when procs=1 #ifdef _OPENMP #pragma omp parallel default (shared) #endif { #ifdef _OPENMP #pragma omp master #endif { // t = SuperLU_timer_(); #ifdef _OPENMP #pragma omp taskloop private (k,knsupc,lk,irow,l,i,j) untied #endif for (k = 0; k < nsupers; k++) { knsupc = SuperSize( k ); lk = LBi( k, grid ); /* Local block number */ irow = FstBlockC( k ); l = X_BLK( lk ); for (i = 0; i < knsupc; ++i) { RHS_ITERATE(j) { /* RHS is stored in row major in the buffer. */ B[irow-fst_row +i + j*ldb] = x[l + i + j*knsupc]; } } } } } }else{ k = sdispls[procs-1] + SendCnt[procs-1]; /* Total number of sends */ l = rdispls[procs-1] + RecvCnt[procs-1]; /* Total number of receives */ if ( !(send_ibuf = intMalloc_dist(k + l)) ) ABORT("Malloc fails for send_ibuf[]."); recv_ibuf = send_ibuf + k; if ( !(send_dbuf = doublecomplexMalloc_dist((k + l)*nrhs)) ) ABORT("Malloc fails for send_dbuf[]."); if ( !(req_send = (MPI_Request*) SUPERLU_MALLOC(procs*sizeof(MPI_Request))) ) ABORT("Malloc fails for req_send[]."); if ( !(req_recv = (MPI_Request*) SUPERLU_MALLOC(procs*sizeof(MPI_Request))) ) ABORT("Malloc fails for req_recv[]."); if ( !(status_send = (MPI_Status*) SUPERLU_MALLOC(procs*sizeof(MPI_Status))) ) ABORT("Malloc fails for status_send[]."); if ( !(status_recv = (MPI_Status*) SUPERLU_MALLOC(procs*sizeof(MPI_Status))) ) ABORT("Malloc fails for status_recv[]."); recv_dbuf = send_dbuf + k * nrhs; for (p = 0; p < procs; ++p) { ptr_to_ibuf[p] = sdispls[p]; ptr_to_dbuf[p] = sdispls_nrhs[p]; } num_diag_procs = SOLVEstruct->num_diag_procs; diag_procs = SOLVEstruct->diag_procs; for (p = 0; p < num_diag_procs; ++p) { /* For all diagonal processes. */ pkk = diag_procs[p]; if ( iam == pkk ) { for (k = p; k < nsupers; k += num_diag_procs) { knsupc = SuperSize( k ); lk = LBi( k, grid ); /* Local block number */ irow = FstBlockC( k ); l = X_BLK( lk ); for (i = 0; i < knsupc; ++i) { #if 0 ii = inv_perm_c[irow]; /* Apply X <== Pc'*Y */ #else ii = irow; #endif q = row_to_proc[ii]; jj = ptr_to_ibuf[q]; send_ibuf[jj] = ii; jj = ptr_to_dbuf[q]; RHS_ITERATE(j) { /* RHS stored in row major in buffer. */ send_dbuf[jj++] = x[l + i + j*knsupc]; } ++ptr_to_ibuf[q]; ptr_to_dbuf[q] += nrhs; ++irow; } } } } /* ------------------------------------------------------------ COMMUNICATE THE (PERMUTED) ROW INDICES AND NUMERICAL VALUES. ------------------------------------------------------------*/ #if 0 #if 1 MPI_Alltoallv(send_ibuf, SendCnt, sdispls, mpi_int_t, recv_ibuf, RecvCnt, rdispls, mpi_int_t, grid->comm); MPI_Alltoallv(send_dbuf, SendCnt_nrhs, sdispls_nrhs,SuperLU_MPI_DOUBLE_COMPLEX, recv_dbuf, RecvCnt_nrhs, rdispls_nrhs, SuperLU_MPI_DOUBLE_COMPLEX, grid->comm); #else MPI_Ialltoallv(send_ibuf, SendCnt, sdispls, mpi_int_t, recv_ibuf, RecvCnt, rdispls, mpi_int_t, grid->comm,&req_i); MPI_Ialltoallv(send_dbuf, SendCnt_nrhs, sdispls_nrhs, SuperLU_MPI_DOUBLE_COMPLEX, recv_dbuf, RecvCnt_nrhs, rdispls_nrhs, SuperLU_MPI_DOUBLE_COMPLEX, grid->comm,&req_d); MPI_Wait(&req_i,&status); MPI_Wait(&req_d,&status); #endif #endif MPI_Barrier( grid->comm ); Nreq_send=0; Nreq_recv=0; for (pp=0;pp<procs;pp++){ pps = grid->iam+1+pp; if(pps>=procs)pps-=procs; if(pps<0)pps+=procs; ppr = grid->iam-1+pp; if(ppr>=procs)ppr-=procs; if(ppr<0)ppr+=procs; if(SendCnt[pps]>0){ MPI_Isend(&send_ibuf[sdispls[pps]], SendCnt[pps], mpi_int_t, pps, 0, grid->comm, &req_send[Nreq_send] ); Nreq_send++; } if(RecvCnt[ppr]>0){ MPI_Irecv(&recv_ibuf[rdispls[ppr]], RecvCnt[ppr], mpi_int_t, ppr, 0, grid->comm, &req_recv[Nreq_recv] ); Nreq_recv++; } } if(Nreq_send>0)MPI_Waitall(Nreq_send,req_send,status_send); if(Nreq_recv>0)MPI_Waitall(Nreq_recv,req_recv,status_recv); // MPI_Barrier( grid->comm ); Nreq_send=0; Nreq_recv=0; for (pp=0;pp<procs;pp++){ pps = grid->iam+1+pp; if(pps>=procs)pps-=procs; if(pps<0)pps+=procs; ppr = grid->iam-1+pp; if(ppr>=procs)ppr-=procs; if(ppr<0)ppr+=procs; if(SendCnt_nrhs[pps]>0){ MPI_Isend(&send_dbuf[sdispls_nrhs[pps]], SendCnt_nrhs[pps], SuperLU_MPI_DOUBLE_COMPLEX, pps, 1, grid->comm, &req_send[Nreq_send] ); Nreq_send++; } if(RecvCnt_nrhs[ppr]>0){ MPI_Irecv(&recv_dbuf[rdispls_nrhs[ppr]], RecvCnt_nrhs[ppr], SuperLU_MPI_DOUBLE_COMPLEX, ppr, 1, grid->comm, &req_recv[Nreq_recv] ); Nreq_recv++; } } if(Nreq_send>0)MPI_Waitall(Nreq_send,req_send,status_send); if(Nreq_recv>0)MPI_Waitall(Nreq_recv,req_recv,status_recv); // MPI_Barrier( grid->comm ); /* ------------------------------------------------------------ COPY THE BUFFER INTO B. ------------------------------------------------------------*/ for (i = 0, k = 0; i < m_loc; ++i) { irow = recv_ibuf[i]; irow -= fst_row; /* Relative row number */ RHS_ITERATE(j) { /* RHS is stored in row major in the buffer. */ B[irow + j*ldb] = recv_dbuf[k++]; } } SUPERLU_FREE(send_ibuf); SUPERLU_FREE(send_dbuf); SUPERLU_FREE(req_send); SUPERLU_FREE(req_recv); SUPERLU_FREE(status_send); SUPERLU_FREE(status_recv); } #if ( DEBUGlevel>=1 ) CHECK_MALLOC(grid->iam, "Exit pzReDistribute_X_to_B()"); #endif return 0; } /* pzReDistribute_X_to_B */ /*! \brief * * <pre> * Purpose * ======= * Compute the inverse of the diagonal blocks of the L and U * triangular matrices. * </pre> */ void pzCompute_Diag_Inv(int_t n, LUstruct_t *LUstruct,gridinfo_t *grid, SuperLUStat_t *stat, int *info) { #ifdef SLU_HAVE_LAPACK Glu_persist_t *Glu_persist = LUstruct->Glu_persist; LocalLU_t *Llu = LUstruct->Llu; doublecomplex *lusup; doublecomplex *recvbuf, *tempv; doublecomplex *Linv;/* Inverse of diagonal block */ doublecomplex *Uinv;/* Inverse of diagonal block */ int_t kcol, krow, mycol, myrow; int_t i, ii, il, j, jj, k, lb, ljb, lk, lptr, luptr; int_t nb, nlb,nlb_nodiag, nub, nsupers; int_t *xsup, *supno, *lsub, *usub; int_t *ilsum; /* Starting position of each supernode in lsum (LOCAL)*/ int Pc, Pr, iam; int knsupc, nsupr; int ldalsum; /* Number of lsum entries locally owned. */ int maxrecvsz, p, pi; int_t **Lrowind_bc_ptr; doublecomplex **Lnzval_bc_ptr; doublecomplex **Linv_bc_ptr; doublecomplex **Uinv_bc_ptr; int INFO; double t; doublecomplex one = {1.0, 0.0}; doublecomplex zero = {0.0, 0.0}; #if ( PROFlevel>=1 ) t = SuperLU_timer_(); #endif #if ( PRNTlevel>=2 ) if ( grid->iam==0 ) { printf("computing inverse of diagonal blocks...\n"); fflush(stdout); } #endif /* * Initialization. */ iam = grid->iam; Pc = grid->npcol; Pr = grid->nprow; myrow = MYROW( iam, grid ); mycol = MYCOL( iam, grid ); xsup = Glu_persist->xsup; supno = Glu_persist->supno; nsupers = supno[n-1] + 1; Lrowind_bc_ptr = Llu->Lrowind_bc_ptr; Linv_bc_ptr = Llu->Linv_bc_ptr; Uinv_bc_ptr = Llu->Uinv_bc_ptr; Lnzval_bc_ptr = Llu->Lnzval_bc_ptr; nlb = CEILING( nsupers, Pr ); /* Number of local block rows. */ Llu->inv = 1; /*--------------------------------------------------- * Compute inverse of L(lk,lk). *---------------------------------------------------*/ for (k = 0; k < nsupers; ++k) { krow = PROW( k, grid ); if ( myrow == krow ) { lk = LBi( k, grid ); /* local block number */ kcol = PCOL( k, grid ); if ( mycol == kcol ) { /* diagonal process */ lk = LBj( k, grid ); /* Local block number, column-wise. */ lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; Linv = Linv_bc_ptr[lk]; Uinv = Uinv_bc_ptr[lk]; nsupr = lsub[1]; knsupc = SuperSize( k ); for (j=0 ; j<knsupc; j++){ for (i=0 ; i<knsupc; i++){ Linv[j*knsupc+i] = zero; Uinv[j*knsupc+i] = zero; } } for (j=0 ; j<knsupc; j++){ Linv[j*knsupc+j] = one; for (i=j+1 ; i<knsupc; i++){ z_copy(&Linv[j*knsupc+i],&lusup[j*nsupr+i]); } for (i=0 ; i<j+1; i++){ z_copy(&Uinv[j*knsupc+i],&lusup[j*nsupr+i]); } } /* Triangular inversion */ ztrtri_("L","U",&knsupc,Linv,&knsupc,&INFO); ztrtri_("U","N",&knsupc,Uinv,&knsupc,&INFO); } /* end if (mycol === kcol) */ } /* end if (myrow === krow) */ } /* end fo k = ... nsupers */ #if ( PROFlevel>=1 ) if( grid->iam==0 ) { t = SuperLU_timer_() - t; printf(".. L-diag_inv time\t%10.5f\n", t); fflush(stdout); } #endif return; #endif /* SLU_HAVE_LAPACK */ } /*! \brief * * <pre> * Purpose * ======= * * PZGSTRS solves a system of distributed linear equations * A*X = B with a general N-by-N matrix A using the LU factorization * computed by PZGSTRF. * If the equilibration, and row and column permutations were performed, * the LU factorization was performed for A1 where * A1 = Pc*Pr*diag(R)*A*diag(C)*Pc^T = L*U * and the linear system solved is * A1 * Y = Pc*Pr*B1, where B was overwritten by B1 = diag(R)*B, and * the permutation to B1 by Pc*Pr is applied internally in this routine. * * Arguments * ========= * * n (input) int (global) * The order of the system of linear equations. * * LUstruct (input) LUstruct_t* * The distributed data structures storing L and U factors. * The L and U factors are obtained from PZGSTRF for * the possibly scaled and permuted matrix A. * See superlu_zdefs.h for the definition of 'LUstruct_t'. * A may be scaled and permuted into A1, so that * A1 = Pc*Pr*diag(R)*A*diag(C)*Pc^T = L*U * * grid (input) gridinfo_t* * The 2D process mesh. It contains the MPI communicator, the number * of process rows (NPROW), the number of process columns (NPCOL), * and my process rank. It is an input argument to all the * parallel routines. * Grid can be initialized by subroutine SUPERLU_GRIDINIT. * See superlu_defs.h for the definition of 'gridinfo_t'. * * B (input/output) doublecomplex* * On entry, the distributed right-hand side matrix of the possibly * equilibrated system. That is, B may be overwritten by diag(R)*B. * On exit, the distributed solution matrix Y of the possibly * equilibrated system if info = 0, where Y = Pc*diag(C)^(-1)*X, * and X is the solution of the original system. * * m_loc (input) int (local) * The local row dimension of matrix B. * * fst_row (input) int (global) * The row number of B's first row in the global matrix. * * ldb (input) int (local) * The leading dimension of matrix B. * * nrhs (input) int (global) * Number of right-hand sides. * * SOLVEstruct (input) SOLVEstruct_t* (global) * Contains the information for the communication during the * solution phase. * * stat (output) SuperLUStat_t* * Record the statistics about the triangular solves. * See util.h for the definition of 'SuperLUStat_t'. * * info (output) int* * = 0: successful exit * < 0: if info = -i, the i-th argument had an illegal value * </pre> */ void pzgstrs(int_t n, LUstruct_t *LUstruct, ScalePermstruct_t *ScalePermstruct, gridinfo_t *grid, doublecomplex *B, int_t m_loc, int_t fst_row, int_t ldb, int nrhs, SOLVEstruct_t *SOLVEstruct, SuperLUStat_t *stat, int *info) { Glu_persist_t *Glu_persist = LUstruct->Glu_persist; LocalLU_t *Llu = LUstruct->Llu; doublecomplex alpha = {1.0, 0.0}; doublecomplex beta = {0.0, 0.0}; doublecomplex zero = {0.0, 0.0}; doublecomplex *lsum; /* Local running sum of the updates to B-components */ doublecomplex *x; /* X component at step k. */ /* NOTE: x and lsum are of same size. */ doublecomplex *lusup, *dest; doublecomplex *recvbuf, *recvbuf_on, *tempv, *recvbufall, *recvbuf_BC_fwd, *recvbuf0, *xin; doublecomplex *rtemp, *rtemp_loc; /* Result of full matrix-vector multiply. */ doublecomplex *Linv; /* Inverse of diagonal block */ doublecomplex *Uinv; /* Inverse of diagonal block */ int *ipiv; int_t *leaf_send; int_t nleaf_send, nleaf_send_tmp; int_t *root_send; int_t nroot_send, nroot_send_tmp; int_t **Ufstnz_br_ptr = Llu->Ufstnz_br_ptr; /*-- Data structures used for broadcast and reduction trees. --*/ BcTree *LBtree_ptr = Llu->LBtree_ptr; RdTree *LRtree_ptr = Llu->LRtree_ptr; BcTree *UBtree_ptr = Llu->UBtree_ptr; RdTree *URtree_ptr = Llu->URtree_ptr; int_t *Urbs1; /* Number of row blocks in each block column of U. */ int_t *Urbs = Llu->Urbs; /* Number of row blocks in each block column of U. */ Ucb_indptr_t **Ucb_indptr = Llu->Ucb_indptr;/* Vertical linked list pointing to Uindex[] */ int_t **Ucb_valptr = Llu->Ucb_valptr; /* Vertical linked list pointing to Unzval[] */ int_t kcol, krow, mycol, myrow; int_t i, ii, il, j, jj, k, kk, lb, ljb, lk, lib, lptr, luptr, gb, nn; int_t nb, nlb,nlb_nodiag, nub, nsupers, nsupers_j, nsupers_i,maxsuper; int_t *xsup, *supno, *lsub, *usub; int_t *ilsum; /* Starting position of each supernode in lsum (LOCAL)*/ int Pc, Pr, iam; int knsupc, nsupr, nprobe; int nbtree, nrtree, outcount; int ldalsum; /* Number of lsum entries locally owned. */ int maxrecvsz, p, pi; int_t **Lrowind_bc_ptr; doublecomplex **Lnzval_bc_ptr; doublecomplex **Linv_bc_ptr; doublecomplex **Uinv_bc_ptr; doublecomplex sum; MPI_Status status,status_on,statusx,statuslsum; pxgstrs_comm_t *gstrs_comm = SOLVEstruct->gstrs_comm; SuperLUStat_t **stat_loc; double tmax; /*-- Counts used for L-solve --*/ int_t *fmod; /* Modification count for L-solve -- Count the number of local block products to be summed into lsum[lk]. */ int_t fmod_tmp; int_t **fsendx_plist = Llu->fsendx_plist; int_t nfrecvx = Llu->nfrecvx; /* Number of X components to be recv'd. */ int_t nfrecvx_buf=0; int_t *frecv; /* Count of lsum[lk] contributions to be received from processes in this row. It is only valid on the diagonal processes. */ int_t frecv_tmp; int_t nfrecvmod = 0; /* Count of total modifications to be recv'd. */ int_t nfrecv = 0; /* Count of total messages to be recv'd. */ int_t nbrecv = 0; /* Count of total messages to be recv'd. */ int_t nleaf = 0, nroot = 0; int_t nleaftmp = 0, nroottmp = 0; int_t msgsize; /*-- Counts used for U-solve --*/ int_t *bmod; /* Modification count for U-solve. */ int_t bmod_tmp; int_t **bsendx_plist = Llu->bsendx_plist; int_t nbrecvx = Llu->nbrecvx; /* Number of X components to be recv'd. */ int_t nbrecvx_buf=0; int_t *brecv; /* Count of modifications to be recv'd from processes in this row. */ int_t nbrecvmod = 0; /* Count of total modifications to be recv'd. */ int_t flagx,flaglsum,flag; int_t *LBTree_active, *LRTree_active, *LBTree_finish, *LRTree_finish, *leafsups, *rootsups; int_t TAG; double t1_sol, t2_sol, t; #if ( DEBUGlevel>=2 ) int_t Ublocks = 0; #endif int_t gik,iklrow,fnz; int_t *mod_bit = Llu->mod_bit; /* flag contribution from each row block */ int INFO, pad; int_t tmpresult; // #if ( PROFlevel>=1 ) double t1, t2; float msg_vol = 0, msg_cnt = 0; // #endif int_t msgcnt[4]; /* Count the size of the message xfer'd in each buffer: * 0 : transferred in Lsub_buf[] * 1 : transferred in Lval_buf[] * 2 : transferred in Usub_buf[] * 3 : transferred in Uval_buf[] */ int iword = sizeof (int_t); int dword = sizeof (double); int Nwork; int_t procs = grid->nprow * grid->npcol; yes_no_t done; yes_no_t startforward; int nbrow; int_t ik, rel, idx_r, jb, nrbl, irow, pc,iknsupc; int_t lptr1_tmp, idx_i, idx_v,m; int_t ready; static int thread_id; yes_no_t empty; int_t sizelsum,sizertemp,aln_d,aln_i; aln_d = ceil(CACHELINE/(double)dword); aln_i = ceil(CACHELINE/(double)iword); int num_thread = 1; maxsuper = sp_ienv_dist(3); #ifdef _OPENMP #pragma omp threadprivate(thread_id) #endif #ifdef _OPENMP #pragma omp parallel default(shared) { if (omp_get_thread_num () == 0) { num_thread = omp_get_num_threads (); } thread_id = omp_get_thread_num (); } #endif #if ( PRNTlevel>=1 ) if( grid->iam==0 ) { printf("num_thread: %5d\n", num_thread); fflush(stdout); } #endif MPI_Barrier( grid->comm ); t1_sol = SuperLU_timer_(); t = SuperLU_timer_(); /* Test input parameters. */ *info = 0; if ( n < 0 ) *info = -1; else if ( nrhs < 0 ) *info = -9; if ( *info ) { pxerr_dist("PZGSTRS", grid, -*info); return; } /* * Initialization. */ iam = grid->iam; Pc = grid->npcol; Pr = grid->nprow; myrow = MYROW( iam, grid ); mycol = MYCOL( iam, grid ); xsup = Glu_persist->xsup; supno = Glu_persist->supno; nsupers = supno[n-1] + 1; Lrowind_bc_ptr = Llu->Lrowind_bc_ptr; Lnzval_bc_ptr = Llu->Lnzval_bc_ptr; Linv_bc_ptr = Llu->Linv_bc_ptr; Uinv_bc_ptr = Llu->Uinv_bc_ptr; nlb = CEILING( nsupers, Pr ); /* Number of local block rows. */ stat->utime[SOL_COMM] = 0.0; stat->utime[SOL_GEMM] = 0.0; stat->utime[SOL_TRSM] = 0.0; stat->utime[SOL_TOT] = 0.0; #if ( DEBUGlevel>=1 ) CHECK_MALLOC(iam, "Enter pzgstrs()"); #endif stat->ops[SOLVE] = 0.0; Llu->SolveMsgSent = 0; /* Save the count to be altered so it can be used by subsequent call to PDGSTRS. */ if ( !(fmod = intMalloc_dist(nlb*aln_i)) ) ABORT("Malloc fails for fmod[]."); for (i = 0; i < nlb; ++i) fmod[i*aln_i] = Llu->fmod[i]; if ( !(frecv = intCalloc_dist(nlb)) ) ABORT("Calloc fails for frecv[]."); Llu->frecv = frecv; if ( !(leaf_send = intMalloc_dist((CEILING( nsupers, Pr )+CEILING( nsupers, Pc ))*aln_i)) ) ABORT("Malloc fails for leaf_send[]."); nleaf_send=0; if ( !(root_send = intMalloc_dist((CEILING( nsupers, Pr )+CEILING( nsupers, Pc ))*aln_i)) ) ABORT("Malloc fails for root_send[]."); nroot_send=0; #ifdef _CRAY ftcs1 = _cptofcd("L", strlen("L")); ftcs2 = _cptofcd("N", strlen("N")); ftcs3 = _cptofcd("U", strlen("U")); #endif /* Obtain ilsum[] and ldalsum for process column 0. */ ilsum = Llu->ilsum; ldalsum = Llu->ldalsum; /* Allocate working storage. */ knsupc = sp_ienv_dist(3); maxrecvsz = knsupc * nrhs + SUPERLU_MAX( XK_H, LSUM_H ); sizelsum = (((size_t)ldalsum)*nrhs + nlb*LSUM_H); sizelsum = ((sizelsum + (aln_d - 1)) / aln_d) * aln_d; #ifdef _OPENMP if ( !(lsum = (doublecomplex*)SUPERLU_MALLOC(sizelsum*num_thread * sizeof(doublecomplex)))) ABORT("Malloc fails for lsum[]."); #pragma omp parallel default(shared) private(ii) { for (ii=0; ii<sizelsum; ii++) lsum[thread_id*sizelsum+ii]=zero; } #else if ( !(lsum = (doublecomplex*)SUPERLU_MALLOC(sizelsum*num_thread * sizeof(doublecomplex)))) ABORT("Malloc fails for lsum[]."); for ( ii=0; ii < sizelsum*num_thread; ii++ ) lsum[ii]=zero; #endif if ( !(x = doublecomplexCalloc_dist(ldalsum * nrhs + nlb * XK_H)) ) ABORT("Calloc fails for x[]."); sizertemp=ldalsum * nrhs; sizertemp = ((sizertemp + (aln_d - 1)) / aln_d) * aln_d; if ( !(rtemp = (doublecomplex*)SUPERLU_MALLOC((sizertemp*num_thread + 1) * sizeof(doublecomplex))) ) ABORT("Malloc fails for rtemp[]."); #ifdef _OPENMP #pragma omp parallel default(shared) private(ii) { for ( ii=0; ii<sizertemp; ii++ ) rtemp[thread_id*sizertemp+ii]=zero; } #else for ( ii=0; ii<sizertemp*num_thread; ii++ ) rtemp[ii]=zero; #endif if ( !(stat_loc = (SuperLUStat_t**) SUPERLU_MALLOC(num_thread*sizeof(SuperLUStat_t*))) ) ABORT("Malloc fails for stat_loc[]."); for ( i=0; i<num_thread; i++) { stat_loc[i] = (SuperLUStat_t*)SUPERLU_MALLOC(sizeof(SuperLUStat_t)); PStatInit(stat_loc[i]); } #if ( DEBUGlevel>=2 ) /* Dump the L factor using matlab triple-let format. */ zDumpLblocks(iam, nsupers, grid, Glu_persist, Llu); #endif /*--------------------------------------------------- * Forward solve Ly = b. *---------------------------------------------------*/ /* Redistribute B into X on the diagonal processes. */ pzReDistribute_B_to_X(B, m_loc, nrhs, ldb, fst_row, ilsum, x, ScalePermstruct, Glu_persist, grid, SOLVEstruct); #if ( PRNTlevel>=2 ) t = SuperLU_timer_() - t; if ( !iam) printf(".. B to X redistribute time\t%8.4f\n", t); fflush(stdout); t = SuperLU_timer_(); #endif /* Set up the headers in lsum[]. */ #ifdef _OPENMP #pragma omp simd lastprivate(krow,lk,il) #endif for (k = 0; k < nsupers; ++k) { krow = PROW( k, grid ); if ( myrow == krow ) { lk = LBi( k, grid ); /* Local block number. */ il = LSUM_BLK( lk ); lsum[il - LSUM_H].r = k;/* Block number prepended in the header.*/ lsum[il - LSUM_H].i = 0; } } /* --------------------------------------------------------- Initialize the async Bcast trees on all processes. --------------------------------------------------------- */ nsupers_j = CEILING( nsupers, grid->npcol ); /* Number of local block columns */ nbtree = 0; for (lk=0;lk<nsupers_j;++lk){ if(LBtree_ptr[lk]!=NULL){ // printf("LBtree_ptr lk %5d\n",lk); if(BcTree_IsRoot(LBtree_ptr[lk],'z')==NO){ nbtree++; if(BcTree_getDestCount(LBtree_ptr[lk],'z')>0)nfrecvx_buf++; } BcTree_allocateRequest(LBtree_ptr[lk],'z'); } } nsupers_i = CEILING( nsupers, grid->nprow ); /* Number of local block rows */ if ( !( leafsups = (int_t*)intCalloc_dist(nsupers_i)) ) ABORT("Calloc fails for leafsups."); nrtree = 0; nleaf=0; nfrecvmod=0; if(procs==1){ for (lk=0;lk<nsupers_i;++lk){ gb = myrow+lk*grid->nprow; /* not sure */ if(gb<nsupers){ if (fmod[lk*aln_i]==0){ leafsups[nleaf]=gb; ++nleaf; } } } }else{ for (lk=0;lk<nsupers_i;++lk){ if(LRtree_ptr[lk]!=NULL){ nrtree++; RdTree_allocateRequest(LRtree_ptr[lk],'z'); frecv[lk] = RdTree_GetDestCount(LRtree_ptr[lk],'z'); nfrecvmod += frecv[lk]; }else{ gb = myrow+lk*grid->nprow; /* not sure */ if(gb<nsupers){ kcol = PCOL( gb, grid ); if(mycol==kcol) { /* Diagonal process */ if (fmod[lk*aln_i]==0){ leafsups[nleaf]=gb; ++nleaf; } } } } } } #ifdef _OPENMP #pragma omp simd #endif for (i = 0; i < nlb; ++i) fmod[i*aln_i] += frecv[i]; if ( !(recvbuf_BC_fwd = (doublecomplex*)SUPERLU_MALLOC(maxrecvsz*(nfrecvx+1) * sizeof(doublecomplex))) ) // this needs to be optimized for 1D row mapping ABORT("Malloc fails for recvbuf_BC_fwd[]."); nfrecvx_buf=0; log_memory(nlb*aln_i*iword+nlb*iword+(CEILING( nsupers, Pr )+CEILING( nsupers, Pc ))*aln_i*2.0*iword+ nsupers_i*iword + sizelsum*num_thread * dword*2.0 + (ldalsum * nrhs + nlb * XK_H) *dword*2.0 + (sizertemp*num_thread + 1)*dword*2.0+maxrecvsz*(nfrecvx+1)*dword*2.0, stat); //account for fmod, frecv, leaf_send, root_send, leafsups, recvbuf_BC_fwd , lsum, x, rtemp #if ( DEBUGlevel>=2 ) printf("(%2d) nfrecvx %4d, nfrecvmod %4d, nleaf %4d\n, nbtree %4d\n, nrtree %4d\n", iam, nfrecvx, nfrecvmod, nleaf, nbtree, nrtree); fflush(stdout); #endif #if ( PRNTlevel>=2 ) t = SuperLU_timer_() - t; if ( !iam) printf(".. Setup L-solve time\t%8.4f\n", t); fflush(stdout); MPI_Barrier( grid->comm ); t = SuperLU_timer_(); #endif #if ( VAMPIR>=1 ) // VT_initialize(); VT_traceon(); #endif #ifdef USE_VTUNE __SSC_MARK(0x111);// start SDE tracing, note uses 2 underscores __itt_resume(); // start VTune, again use 2 underscores #endif /* --------------------------------------------------------- Solve the leaf nodes first by all the diagonal processes. --------------------------------------------------------- */ #if ( DEBUGlevel>=2 ) printf("(%2d) nleaf %4d\n", iam, nleaf); fflush(stdout); #endif #ifdef _OPENMP #pragma omp parallel default (shared) #endif { { if (Llu->inv == 1) { /* Diagonal is inverted. */ #ifdef _OPENMP #pragma omp for firstprivate(nrhs,beta,alpha,x,rtemp,ldalsum) private (ii,k,knsupc,lk,luptr,lsub,nsupr,lusup,t1,t2,Linv,i,lib,rtemp_loc,nleaf_send_tmp) nowait #endif for (jj=0;jj<nleaf;jj++){ k=leafsups[jj]; // #ifdef _OPENMP // #pragma omp task firstprivate (k,nrhs,beta,alpha,x,rtemp,ldalsum) private (ii,knsupc,lk,luptr,lsub,nsupr,lusup,thread_id,t1,t2,Linv,i,lib,rtemp_loc) // #endif { #if ( PROFlevel>=1 ) TIC(t1); #endif rtemp_loc = &rtemp[sizertemp* thread_id]; knsupc = SuperSize( k ); lk = LBi( k, grid ); ii = X_BLK( lk ); lk = LBj( k, grid ); /* Local block number, column-wise. */ lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; nsupr = lsub[1]; Linv = Linv_bc_ptr[lk]; #ifdef _CRAY CGEMM( ftcs2, ftcs2, &knsupc, &nrhs, &knsupc, &alpha, Linv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc ); #elif defined (USE_VENDOR_BLAS) zgemm_( "N", "N", &knsupc, &nrhs, &knsupc, &alpha, Linv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc, 1, 1 ); #else zgemm_( "N", "N", &knsupc, &nrhs, &knsupc, &alpha, Linv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc ); #endif #ifdef _OPENMP #pragma omp simd #endif for (i=0 ; i<knsupc*nrhs ; i++){ z_copy(&x[ii+i],&rtemp_loc[i]); } // for (i=0 ; i<knsupc*nrhs ; i++){ // printf("x_l: %f %f\n",x[ii+i].r,x[ii+i].i); // fflush(stdout); // } #if ( PROFlevel>=1 ) TOC(t2, t1); stat_loc[thread_id]->utime[SOL_TRSM] += t2; #endif stat_loc[thread_id]->ops[SOLVE] += 4 * knsupc * (knsupc - 1) * nrhs + 10 * knsupc * nrhs; /* complex division */ // --nleaf; #if ( DEBUGlevel>=2 ) printf("(%2d) Solve X[%2d]\n", iam, k); #endif /* * Send Xk to process column Pc[k]. */ if(LBtree_ptr[lk]!=NULL){ lib = LBi( k, grid ); /* Local block number, row-wise. */ ii = X_BLK( lib ); #ifdef _OPENMP #pragma omp atomic capture #endif nleaf_send_tmp = ++nleaf_send; leaf_send[(nleaf_send_tmp-1)*aln_i] = lk; // BcTree_forwardMessageSimple(LBtree_ptr[lk],&x[ii - XK_H],'z'); } } } } else { /* Diagonal is not inverted. */ #ifdef _OPENMP #pragma omp for firstprivate (nrhs,beta,alpha,x,rtemp,ldalsum) private (ii,k,knsupc,lk,luptr,lsub,nsupr,lusup,t1,t2,Linv,i,lib,rtemp_loc,nleaf_send_tmp) nowait #endif for (jj=0;jj<nleaf;jj++) { k=leafsups[jj]; { #if ( PROFlevel>=1 ) TIC(t1); #endif rtemp_loc = &rtemp[sizertemp* thread_id]; knsupc = SuperSize( k ); lk = LBi( k, grid ); ii = X_BLK( lk ); lk = LBj( k, grid ); /* Local block number, column-wise. */ lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; nsupr = lsub[1]; #ifdef _CRAY CTRSM(ftcs1, ftcs1, ftcs2, ftcs3, &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc); #elif defined (USE_VENDOR_BLAS) ztrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc, 1, 1, 1, 1); #else ztrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc); #endif // for (i=0 ; i<knsupc*nrhs ; i++){ // printf("x_l: %f %f\n",x[ii+i].r,x[ii+i].i); // fflush(stdout); // } #if ( PROFlevel>=1 ) TOC(t2, t1); stat_loc[thread_id]->utime[SOL_TRSM] += t2; #endif stat_loc[thread_id]->ops[SOLVE] += 4 * knsupc * (knsupc - 1) * nrhs + 10 * knsupc * nrhs; /* complex division */ // --nleaf; #if ( DEBUGlevel>=2 ) printf("(%2d) Solve X[%2d]\n", iam, k); #endif /* * Send Xk to process column Pc[k]. */ if (LBtree_ptr[lk]!=NULL) { lib = LBi( k, grid ); /* Local block number, row-wise. */ ii = X_BLK( lib ); #ifdef _OPENMP #pragma omp atomic capture #endif nleaf_send_tmp = ++nleaf_send; leaf_send[(nleaf_send_tmp-1)*aln_i] = lk; } } /* end a block */ } /* end for jj ... */ } /* end else ... diagonal is not invedted */ } } jj=0; #ifdef _OPENMP #pragma omp parallel default (shared) #endif { #ifdef _OPENMP #pragma omp master #endif { #ifdef _OPENMP #pragma omp taskloop private (k,ii,lk) num_tasks(num_thread*8) nogroup #endif for (jj=0;jj<nleaf;jj++){ k=leafsups[jj]; { /* Diagonal process */ lk = LBi( k, grid ); ii = X_BLK( lk ); /* * Perform local block modifications: lsum[i] -= L_i,k * X[k] */ zlsum_fmod_inv(lsum, x, &x[ii], rtemp, nrhs, k, fmod, xsup, grid, Llu, stat_loc, leaf_send, &nleaf_send,sizelsum,sizertemp,0,maxsuper,thread_id,num_thread); } // } /* if diagonal process ... */ } /* for k ... */ } } for (i=0;i<nleaf_send;i++){ lk = leaf_send[i*aln_i]; if(lk>=0){ // this is a bcast forwarding gb = mycol+lk*grid->npcol; /* not sure */ lib = LBi( gb, grid ); /* Local block number, row-wise. */ ii = X_BLK( lib ); BcTree_forwardMessageSimple(LBtree_ptr[lk],&x[ii - XK_H],BcTree_GetMsgSize(LBtree_ptr[lk],'z')*nrhs+XK_H,'z'); }else{ // this is a reduce forwarding lk = -lk - 1; il = LSUM_BLK( lk ); RdTree_forwardMessageSimple(LRtree_ptr[lk],&lsum[il - LSUM_H ],RdTree_GetMsgSize(LRtree_ptr[lk],'z')*nrhs+LSUM_H,'z'); } } #ifdef USE_VTUNE __itt_pause(); // stop VTune __SSC_MARK(0x222); // stop SDE tracing #endif /* ----------------------------------------------------------- Compute the internal nodes asynchronously by all processes. ----------------------------------------------------------- */ #ifdef _OPENMP #pragma omp parallel default (shared) #endif { #ifdef _OPENMP #pragma omp master #endif { for ( nfrecv =0; nfrecv<nfrecvx+nfrecvmod;nfrecv++) { /* While not finished. */ thread_id = 0; #if ( PROFlevel>=1 ) TIC(t1); // msgcnt[1] = maxrecvsz; #endif recvbuf0 = &recvbuf_BC_fwd[nfrecvx_buf*maxrecvsz]; /* Receive a message. */ MPI_Recv( recvbuf0, maxrecvsz, SuperLU_MPI_DOUBLE_COMPLEX, MPI_ANY_SOURCE, MPI_ANY_TAG, grid->comm, &status ); // MPI_Irecv(recvbuf0,maxrecvsz,SuperLU_MPI_DOUBLE_COMPLEX,MPI_ANY_SOURCE,MPI_ANY_TAG,grid->comm,&req); // ready=0; // while(ready==0){ // MPI_Test(&req,&ready,&status); // #pragma omp taskyield // } #if ( PROFlevel>=1 ) TOC(t2, t1); stat_loc[thread_id]->utime[SOL_COMM] += t2; msg_cnt += 1; msg_vol += maxrecvsz * dword; #endif { k = (*recvbuf0).r; #if ( DEBUGlevel>=2 ) printf("(%2d) Recv'd block %d, tag %2d\n", iam, k, status.MPI_TAG); #endif if(status.MPI_TAG==BC_L){ // --nfrecvx; nfrecvx_buf++; { lk = LBj( k, grid ); /* local block number */ if(BcTree_getDestCount(LBtree_ptr[lk],'z')>0){ BcTree_forwardMessageSimple(LBtree_ptr[lk],recvbuf0,BcTree_GetMsgSize(LBtree_ptr[lk],'z')*nrhs+XK_H,'z'); // nfrecvx_buf++; } /* * Perform local block modifications: lsum[i] -= L_i,k * X[k] */ lk = LBj( k, grid ); /* Local block number, column-wise. */ lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; if ( lsub ) { krow = PROW( k, grid ); if(myrow==krow){ nb = lsub[0] - 1; knsupc = SuperSize( k ); ii = X_BLK( LBi( k, grid ) ); xin = &x[ii]; }else{ nb = lsub[0]; knsupc = SuperSize( k ); xin = &recvbuf0[XK_H] ; } zlsum_fmod_inv_master(lsum, x, xin, rtemp, nrhs, knsupc, k, fmod, nb, xsup, grid, Llu, stat_loc,sizelsum,sizertemp,0,maxsuper,thread_id,num_thread); } /* if lsub */ } }else if(status.MPI_TAG==RD_L){ // --nfrecvmod; lk = LBi( k, grid ); /* Local block number, row-wise. */ knsupc = SuperSize( k ); tempv = &recvbuf0[LSUM_H]; il = LSUM_BLK( lk ); RHS_ITERATE(j) { for (i = 0; i < knsupc; ++i) z_add(&lsum[i + il + j*knsupc + thread_id*sizelsum], &lsum[i + il + j*knsupc + thread_id*sizelsum], &tempv[i + j*knsupc]); } // #ifdef _OPENMP // #pragma omp atomic capture // #endif fmod_tmp=--fmod[lk*aln_i]; { thread_id = 0; rtemp_loc = &rtemp[sizertemp* thread_id]; if ( fmod_tmp==0 ) { if(RdTree_IsRoot(LRtree_ptr[lk],'z')==YES){ // ii = X_BLK( lk ); knsupc = SuperSize( k ); for (ii=1;ii<num_thread;ii++) #ifdef _OPENMP #pragma omp simd #endif for (jj=0;jj<knsupc*nrhs;jj++) z_add(&lsum[il + jj ], &lsum[il + jj ], &lsum[il + jj + ii*sizelsum]); ii = X_BLK( lk ); RHS_ITERATE(j) #ifdef _OPENMP #pragma omp simd #endif for (i = 0; i < knsupc; ++i) z_add(&x[i + ii + j*knsupc], &x[i + ii + j*knsupc], &lsum[i + il + j*knsupc] ); // fmod[lk] = -1; /* Do not solve X[k] in the future. */ lk = LBj( k, grid ); /* Local block number, column-wise. */ lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; nsupr = lsub[1]; #if ( PROFlevel>=1 ) TIC(t1); #endif if(Llu->inv == 1){ Linv = Linv_bc_ptr[lk]; #ifdef _CRAY CGEMM( ftcs2, ftcs2, &knsupc, &nrhs, &knsupc, &alpha, Linv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc ); #elif defined (USE_VENDOR_BLAS) zgemm_( "N", "N", &knsupc, &nrhs, &knsupc, &alpha, Linv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc, 1, 1 ); #else zgemm_( "N", "N", &knsupc, &nrhs, &knsupc, &alpha, Linv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc ); #endif #ifdef _OPENMP #pragma omp simd #endif for (i=0 ; i<knsupc*nrhs ; i++){ z_copy(&x[ii+i],&rtemp_loc[i]); } } else{ #ifdef _CRAY CTRSM(ftcs1, ftcs1, ftcs2, ftcs3, &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc); #elif defined (USE_VENDOR_BLAS) ztrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc, 1, 1, 1, 1); #else ztrsm_("L", "L", "N", "U", &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc); #endif } #if ( PROFlevel>=1 ) TOC(t2, t1); stat_loc[thread_id]->utime[SOL_TRSM] += t2; #endif stat_loc[thread_id]->ops[SOLVE] += 4 * knsupc * (knsupc - 1) * nrhs + 10 * knsupc * nrhs; /* complex division */ #if ( DEBUGlevel>=2 ) printf("(%2d) Solve X[%2d]\n", iam, k); #endif /* * Send Xk to process column Pc[k]. */ if(LBtree_ptr[lk]!=NULL){ BcTree_forwardMessageSimple(LBtree_ptr[lk],&x[ii - XK_H],BcTree_GetMsgSize(LBtree_ptr[lk],'z')*nrhs+XK_H,'z'); } /* * Perform local block modifications. */ lk = LBj( k, grid ); /* Local block number, column-wise. */ lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; if ( lsub ) { krow = PROW( k, grid ); nb = lsub[0] - 1; knsupc = SuperSize( k ); ii = X_BLK( LBi( k, grid ) ); xin = &x[ii]; zlsum_fmod_inv_master(lsum, x, xin, rtemp, nrhs, knsupc, k, fmod, nb, xsup, grid, Llu, stat_loc,sizelsum,sizertemp,0,maxsuper,thread_id,num_thread); } /* if lsub */ // } }else{ il = LSUM_BLK( lk ); knsupc = SuperSize( k ); for (ii=1;ii<num_thread;ii++) #ifdef _OPENMP #pragma omp simd #endif for (jj=0;jj<knsupc*nrhs;jj++) z_add(&lsum[il + jj ], &lsum[il + jj ], &lsum[il + jj + ii*sizelsum]); RdTree_forwardMessageSimple(LRtree_ptr[lk],&lsum[il-LSUM_H],RdTree_GetMsgSize(LRtree_ptr[lk],'z')*nrhs+LSUM_H,'z'); } } } } /* check Tag */ } } /* while not finished ... */ } } #if ( PRNTlevel>=2 ) t = SuperLU_timer_() - t; stat->utime[SOL_TOT] += t; if ( !iam ) { printf(".. L-solve time\t%8.4f\n", t); fflush(stdout); } MPI_Reduce (&t, &tmax, 1, MPI_DOUBLE, MPI_MAX, 0, grid->comm); if ( !iam ) { printf(".. L-solve time (MAX) \t%8.4f\n", tmax); fflush(stdout); } t = SuperLU_timer_(); #endif #if ( DEBUGlevel==2 ) { printf("(%d) .. After L-solve: y =\n", iam); for (i = 0, k = 0; k < nsupers; ++k) { krow = PROW( k, grid ); kcol = PCOL( k, grid ); if ( myrow == krow && mycol == kcol ) { /* Diagonal process */ knsupc = SuperSize( k ); lk = LBi( k, grid ); ii = X_BLK( lk ); for (j = 0; j < knsupc; ++j) printf("\t(%d)\t%4d\t%.10f\n", iam, xsup[k]+j, x[ii+j]); fflush(stdout); } MPI_Barrier( grid->comm ); } } #endif SUPERLU_FREE(fmod); SUPERLU_FREE(frecv); SUPERLU_FREE(leaf_send); SUPERLU_FREE(leafsups); SUPERLU_FREE(recvbuf_BC_fwd); log_memory(-nlb*aln_i*iword-nlb*iword-(CEILING( nsupers, Pr )+CEILING( nsupers, Pc ))*aln_i*iword- nsupers_i*iword -maxrecvsz*(nfrecvx+1)*dword*2.0, stat); //account for fmod, frecv, leaf_send, leafsups, recvbuf_BC_fwd for (lk=0;lk<nsupers_j;++lk){ if(LBtree_ptr[lk]!=NULL){ // if(BcTree_IsRoot(LBtree_ptr[lk],'z')==YES){ BcTree_waitSendRequest(LBtree_ptr[lk],'z'); // } // deallocate requests here } } for (lk=0;lk<nsupers_i;++lk){ if(LRtree_ptr[lk]!=NULL){ RdTree_waitSendRequest(LRtree_ptr[lk],'z'); // deallocate requests here } } MPI_Barrier( grid->comm ); #if ( VAMPIR>=1 ) VT_traceoff(); VT_finalize(); #endif /*--------------------------------------------------- * Back solve Ux = y. * * The Y components from the forward solve is already * on the diagonal processes. *---------------------------------------------------*/ /* Save the count to be altered so it can be used by subsequent call to PDGSTRS. */ if ( !(bmod = intMalloc_dist(nlb*aln_i)) ) ABORT("Malloc fails for bmod[]."); for (i = 0; i < nlb; ++i) bmod[i*aln_i] = Llu->bmod[i]; if ( !(brecv = intCalloc_dist(nlb)) ) ABORT("Calloc fails for brecv[]."); Llu->brecv = brecv; k = SUPERLU_MAX( Llu->nfsendx, Llu->nbsendx ) + nlb; /* Re-initialize lsum to zero. Each block header is already in place. */ #ifdef _OPENMP #pragma omp parallel default(shared) private(ii) { for(ii=0;ii<sizelsum;ii++) lsum[thread_id*sizelsum+ii]=zero; } /* Set up the headers in lsum[]. */ #ifdef _OPENMP #pragma omp simd lastprivate(krow,lk,il) #endif for (k = 0; k < nsupers; ++k) { krow = PROW( k, grid ); if ( myrow == krow ) { lk = LBi( k, grid ); /* Local block number. */ il = LSUM_BLK( lk ); lsum[il - LSUM_H].r = k;/* Block number prepended in the header.*/ lsum[il - LSUM_H].i = 0; } } #else for (k = 0; k < nsupers; ++k) { krow = PROW( k, grid ); if ( myrow == krow ) { knsupc = SuperSize( k ); lk = LBi( k, grid ); il = LSUM_BLK( lk ); dest = &lsum[il]; for (jj = 0; jj < num_thread; ++jj) { RHS_ITERATE(j) { for (i = 0; i < knsupc; ++i) dest[i + j*knsupc + jj*sizelsum] = zero; } } } } #endif #if ( DEBUGlevel>=2 ) for (p = 0; p < Pr*Pc; ++p) { if (iam == p) { printf("(%2d) .. Ublocks %d\n", iam, Ublocks); for (lb = 0; lb < nub; ++lb) { printf("(%2d) Local col %2d: # row blocks %2d\n", iam, lb, Urbs[lb]); if ( Urbs[lb] ) { for (i = 0; i < Urbs[lb]; ++i) printf("(%2d) .. row blk %2d:\ lbnum %d, indpos %d, valpos %d\n", iam, i, Ucb_indptr[lb][i].lbnum, Ucb_indptr[lb][i].indpos, Ucb_valptr[lb][i]); } } } MPI_Barrier( grid->comm ); } for (p = 0; p < Pr*Pc; ++p) { if ( iam == p ) { printf("\n(%d) bsendx_plist[][]", iam); for (lb = 0; lb < nub; ++lb) { printf("\n(%d) .. local col %2d: ", iam, lb); for (i = 0; i < Pr; ++i) printf("%4d", bsendx_plist[lb][i]); } printf("\n"); } MPI_Barrier( grid->comm ); } #endif /* DEBUGlevel */ /* --------------------------------------------------------- Initialize the async Bcast trees on all processes. --------------------------------------------------------- */ nsupers_j = CEILING( nsupers, grid->npcol ); /* Number of local block columns */ nbtree = 0; for (lk=0;lk<nsupers_j;++lk){ if(UBtree_ptr[lk]!=NULL){ // printf("UBtree_ptr lk %5d\n",lk); if(BcTree_IsRoot(UBtree_ptr[lk],'z')==NO){ nbtree++; if(BcTree_getDestCount(UBtree_ptr[lk],'z')>0)nbrecvx_buf++; } BcTree_allocateRequest(UBtree_ptr[lk],'z'); } } nsupers_i = CEILING( nsupers, grid->nprow ); /* Number of local block rows */ if ( !( rootsups = (int_t*)intCalloc_dist(nsupers_i)) ) ABORT("Calloc fails for rootsups."); nrtree = 0; nroot=0; for (lk=0;lk<nsupers_i;++lk){ if(URtree_ptr[lk]!=NULL){ // printf("here lk %5d myid %5d\n",lk,iam); // fflush(stdout); nrtree++; RdTree_allocateRequest(URtree_ptr[lk],'z'); brecv[lk] = RdTree_GetDestCount(URtree_ptr[lk],'z'); nbrecvmod += brecv[lk]; }else{ gb = myrow+lk*grid->nprow; /* not sure */ if(gb<nsupers){ kcol = PCOL( gb, grid ); if(mycol==kcol) { /* Diagonal process */ if (bmod[lk*aln_i]==0){ rootsups[nroot]=gb; ++nroot; } } } } } #ifdef _OPENMP #pragma omp simd #endif for (i = 0; i < nlb; ++i) bmod[i*aln_i] += brecv[i]; // for (i = 0; i < nlb; ++i)printf("bmod[i]: %5d\n",bmod[i]); if ( !(recvbuf_BC_fwd = (doublecomplex*)SUPERLU_MALLOC(maxrecvsz*(nbrecvx+1) * sizeof(doublecomplex))) ) // this needs to be optimized for 1D row mapping ABORT("Malloc fails for recvbuf_BC_fwd[]."); nbrecvx_buf=0; log_memory(nlb*aln_i*iword+nlb*iword + nsupers_i*iword + maxrecvsz*(nbrecvx+1)*dword*2.0, stat); //account for bmod, brecv, rootsups, recvbuf_BC_fwd #if ( DEBUGlevel>=2 ) printf("(%2d) nbrecvx %4d, nbrecvmod %4d, nroot %4d\n, nbtree %4d\n, nrtree %4d\n", iam, nbrecvx, nbrecvmod, nroot, nbtree, nrtree); fflush(stdout); #endif #if ( PRNTlevel>=2 ) t = SuperLU_timer_() - t; if ( !iam) printf(".. Setup U-solve time\t%8.4f\n", t); fflush(stdout); MPI_Barrier( grid->comm ); t = SuperLU_timer_(); #endif /* * Solve the roots first by all the diagonal processes. */ #if ( DEBUGlevel>=2 ) printf("(%2d) nroot %4d\n", iam, nroot); fflush(stdout); #endif #ifdef _OPENMP #pragma omp parallel default (shared) #endif { #ifdef _OPENMP #pragma omp master #endif { #ifdef _OPENMP #pragma omp taskloop firstprivate (nrhs,beta,alpha,x,rtemp,ldalsum) private (ii,jj,k,knsupc,lk,luptr,lsub,nsupr,lusup,t1,t2,Uinv,i,lib,rtemp_loc,nroot_send_tmp) nogroup #endif for (jj=0;jj<nroot;jj++){ k=rootsups[jj]; #if ( PROFlevel>=1 ) TIC(t1); #endif rtemp_loc = &rtemp[sizertemp* thread_id]; knsupc = SuperSize( k ); lk = LBi( k, grid ); /* Local block number, row-wise. */ // bmod[lk] = -1; /* Do not solve X[k] in the future. */ ii = X_BLK( lk ); lk = LBj( k, grid ); /* Local block number, column-wise */ lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; nsupr = lsub[1]; if(Llu->inv == 1){ Uinv = Uinv_bc_ptr[lk]; #ifdef _CRAY CGEMM( ftcs2, ftcs2, &knsupc, &nrhs, &knsupc, &alpha, Uinv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc ); #elif defined (USE_VENDOR_BLAS) zgemm_( "N", "N", &knsupc, &nrhs, &knsupc, &alpha, Uinv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc, 1, 1 ); #else zgemm_( "N", "N", &knsupc, &nrhs, &knsupc, &alpha, Uinv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc ); #endif #ifdef _OPENMP #pragma omp simd #endif for (i=0 ; i<knsupc*nrhs ; i++){ z_copy(&x[ii+i],&rtemp_loc[i]); } }else{ #ifdef _CRAY CTRSM(ftcs1, ftcs3, ftcs2, ftcs2, &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc); #elif defined (USE_VENDOR_BLAS) ztrsm_("L", "U", "N", "N", &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc, 1, 1, 1, 1); #else ztrsm_("L", "U", "N", "N", &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc); #endif } // for (i=0 ; i<knsupc*nrhs ; i++){ // printf("x_u: %f %f\n",x[ii+i].r,x[ii+i].i); // fflush(stdout); // } // for (i=0 ; i<knsupc*nrhs ; i++){ // printf("x: %f\n",x[ii+i]); // fflush(stdout); // } #if ( PROFlevel>=1 ) TOC(t2, t1); stat_loc[thread_id]->utime[SOL_TRSM] += t2; #endif stat_loc[thread_id]->ops[SOLVE] += 4 * knsupc * (knsupc + 1) * nrhs + 10 * knsupc * nrhs; /* complex division */ #if ( DEBUGlevel>=2 ) printf("(%2d) Solve X[%2d]\n", iam, k); #endif /* * Send Xk to process column Pc[k]. */ if(UBtree_ptr[lk]!=NULL){ #ifdef _OPENMP #pragma omp atomic capture #endif nroot_send_tmp = ++nroot_send; root_send[(nroot_send_tmp-1)*aln_i] = lk; } } /* for k ... */ } } #ifdef _OPENMP #pragma omp parallel default (shared) #endif { #ifdef _OPENMP #pragma omp master #endif { #ifdef _OPENMP #pragma omp taskloop private (ii,jj,k,lk) nogroup #endif for (jj=0;jj<nroot;jj++){ k=rootsups[jj]; lk = LBi( k, grid ); /* Local block number, row-wise. */ ii = X_BLK( lk ); lk = LBj( k, grid ); /* Local block number, column-wise */ /* * Perform local block modifications: lsum[i] -= U_i,k * X[k] */ if ( Urbs[lk] ) zlsum_bmod_inv(lsum, x, &x[ii], rtemp, nrhs, k, bmod, Urbs, Ucb_indptr, Ucb_valptr, xsup, grid, Llu, stat_loc, root_send, &nroot_send, sizelsum,sizertemp,thread_id,num_thread); } /* for k ... */ } } for (i=0;i<nroot_send;i++){ lk = root_send[(i)*aln_i]; if(lk>=0){ // this is a bcast forwarding gb = mycol+lk*grid->npcol; /* not sure */ lib = LBi( gb, grid ); /* Local block number, row-wise. */ ii = X_BLK( lib ); BcTree_forwardMessageSimple(UBtree_ptr[lk],&x[ii - XK_H],BcTree_GetMsgSize(UBtree_ptr[lk],'z')*nrhs+XK_H,'z'); }else{ // this is a reduce forwarding lk = -lk - 1; il = LSUM_BLK( lk ); RdTree_forwardMessageSimple(URtree_ptr[lk],&lsum[il - LSUM_H ],RdTree_GetMsgSize(URtree_ptr[lk],'z')*nrhs+LSUM_H,'z'); } } /* * Compute the internal nodes asychronously by all processes. */ #ifdef _OPENMP #pragma omp parallel default (shared) #endif { #ifdef _OPENMP #pragma omp master #endif for ( nbrecv =0; nbrecv<nbrecvx+nbrecvmod;nbrecv++) { /* While not finished. */ // printf("iam %4d nbrecv %4d nbrecvx %4d nbrecvmod %4d\n", iam, nbrecv, nbrecvxnbrecvmod); // fflush(stdout); thread_id = 0; #if ( PROFlevel>=1 ) TIC(t1); #endif recvbuf0 = &recvbuf_BC_fwd[nbrecvx_buf*maxrecvsz]; /* Receive a message. */ MPI_Recv( recvbuf0, maxrecvsz, SuperLU_MPI_DOUBLE_COMPLEX, MPI_ANY_SOURCE, MPI_ANY_TAG, grid->comm, &status ); #if ( PROFlevel>=1 ) TOC(t2, t1); stat_loc[thread_id]->utime[SOL_COMM] += t2; msg_cnt += 1; msg_vol += maxrecvsz * dword; #endif k = (*recvbuf0).r; #if ( DEBUGlevel>=2 ) printf("(%2d) Recv'd block %d, tag %2d\n", iam, k, status.MPI_TAG); fflush(stdout); #endif if(status.MPI_TAG==BC_U){ // --nfrecvx; nbrecvx_buf++; lk = LBj( k, grid ); /* local block number */ if(BcTree_getDestCount(UBtree_ptr[lk],'z')>0){ BcTree_forwardMessageSimple(UBtree_ptr[lk],recvbuf0,BcTree_GetMsgSize(UBtree_ptr[lk],'z')*nrhs+XK_H,'z'); // nfrecvx_buf++; } /* * Perform local block modifications: lsum[i] -= L_i,k * X[k] */ lk = LBj( k, grid ); /* Local block number, column-wise. */ zlsum_bmod_inv_master(lsum, x, &recvbuf0[XK_H], rtemp, nrhs, k, bmod, Urbs, Ucb_indptr, Ucb_valptr, xsup, grid, Llu, stat_loc, sizelsum,sizertemp,thread_id,num_thread); }else if(status.MPI_TAG==RD_U){ lk = LBi( k, grid ); /* Local block number, row-wise. */ knsupc = SuperSize( k ); tempv = &recvbuf0[LSUM_H]; il = LSUM_BLK( lk ); RHS_ITERATE(j) { #ifdef _OPENMP #pragma omp simd #endif for (i = 0; i < knsupc; ++i) z_add(&lsum[i + il + j*knsupc + thread_id*sizelsum], &lsum[i + il + j*knsupc + thread_id*sizelsum], &tempv[i + j*knsupc]); } // #ifdef _OPENMP // #pragma omp atomic capture // #endif bmod_tmp=--bmod[lk*aln_i]; thread_id = 0; rtemp_loc = &rtemp[sizertemp* thread_id]; if ( bmod_tmp==0 ) { if(RdTree_IsRoot(URtree_ptr[lk],'z')==YES){ knsupc = SuperSize( k ); for (ii=1;ii<num_thread;ii++) #ifdef _OPENMP #pragma omp simd #endif for (jj=0;jj<knsupc*nrhs;jj++) z_add(&lsum[il+ jj ], &lsum[il+ jj ], &lsum[il + jj + ii*sizelsum]); ii = X_BLK( lk ); RHS_ITERATE(j) #ifdef _OPENMP #pragma omp simd #endif for (i = 0; i < knsupc; ++i) z_add(&x[i + ii + j*knsupc], &x[i + ii + j*knsupc], &lsum[i + il + j*knsupc] ); lk = LBj( k, grid ); /* Local block number, column-wise. */ lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; nsupr = lsub[1]; if(Llu->inv == 1){ Uinv = Uinv_bc_ptr[lk]; #ifdef _CRAY CGEMM( ftcs2, ftcs2, &knsupc, &nrhs, &knsupc, &alpha, Uinv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc ); #elif defined (USE_VENDOR_BLAS) zgemm_( "N", "N", &knsupc, &nrhs, &knsupc, &alpha, Uinv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc, 1, 1 ); #else zgemm_( "N", "N", &knsupc, &nrhs, &knsupc, &alpha, Uinv, &knsupc, &x[ii], &knsupc, &beta, rtemp_loc, &knsupc ); #endif #ifdef _OPENMP #pragma omp simd #endif for (i=0 ; i<knsupc*nrhs ; i++){ z_copy(&x[ii+i],&rtemp_loc[i]); } }else{ #ifdef _CRAY CTRSM(ftcs1, ftcs3, ftcs2, ftcs2, &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc); #elif defined (USE_VENDOR_BLAS) ztrsm_("L", "U", "N", "N", &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc, 1, 1, 1, 1); #else ztrsm_("L", "U", "N", "N", &knsupc, &nrhs, &alpha, lusup, &nsupr, &x[ii], &knsupc); #endif } #if ( PROFlevel>=1 ) TOC(t2, t1); stat_loc[thread_id]->utime[SOL_TRSM] += t2; #endif stat_loc[thread_id]->ops[SOLVE] += 4 * knsupc * (knsupc + 1) * nrhs + 10 * knsupc * nrhs; /* complex division */ #if ( DEBUGlevel>=2 ) printf("(%2d) Solve X[%2d]\n", iam, k); #endif /* * Send Xk to process column Pc[k]. */ if(UBtree_ptr[lk]!=NULL){ BcTree_forwardMessageSimple(UBtree_ptr[lk],&x[ii - XK_H],BcTree_GetMsgSize(UBtree_ptr[lk],'z')*nrhs+XK_H,'z'); } /* * Perform local block modifications: * lsum[i] -= U_i,k * X[k] */ if ( Urbs[lk] ) zlsum_bmod_inv_master(lsum, x, &x[ii], rtemp, nrhs, k, bmod, Urbs, Ucb_indptr, Ucb_valptr, xsup, grid, Llu, stat_loc, sizelsum,sizertemp,thread_id,num_thread); }else{ il = LSUM_BLK( lk ); knsupc = SuperSize( k ); for (ii=1;ii<num_thread;ii++) #ifdef _OPENMP #pragma omp simd #endif for (jj=0;jj<knsupc*nrhs;jj++) z_add(&lsum[il+ jj ], &lsum[il+ jj ], &lsum[il + jj + ii*sizelsum]); RdTree_forwardMessageSimple(URtree_ptr[lk],&lsum[il-LSUM_H],RdTree_GetMsgSize(URtree_ptr[lk],'z')*nrhs+LSUM_H,'z'); } } } } /* while not finished ... */ } #if ( PRNTlevel>=2 ) t = SuperLU_timer_() - t; stat->utime[SOL_TOT] += t; if ( !iam ) printf(".. U-solve time\t%8.4f\n", t); MPI_Reduce (&t, &tmax, 1, MPI_DOUBLE, MPI_MAX, 0, grid->comm); if ( !iam ) { printf(".. U-solve time (MAX) \t%8.4f\n", tmax); fflush(stdout); } t = SuperLU_timer_(); #endif #if ( DEBUGlevel>=2 ) { double *x_col; int diag; printf("\n(%d) .. After U-solve: x (ON DIAG PROCS) = \n", iam); ii = 0; for (k = 0; k < nsupers; ++k) { knsupc = SuperSize( k ); krow = PROW( k, grid ); kcol = PCOL( k, grid ); diag = PNUM( krow, kcol, grid); if ( iam == diag ) { /* Diagonal process. */ lk = LBi( k, grid ); jj = X_BLK( lk ); x_col = &x[jj]; RHS_ITERATE(j) { for (i = 0; i < knsupc; ++i) { /* X stored in blocks */ printf("\t(%d)\t%4d\t%.10f\n", iam, xsup[k]+i, x_col[i]); } x_col += knsupc; } } ii += knsupc; } /* for k ... */ } #endif pzReDistribute_X_to_B(n, B, m_loc, ldb, fst_row, nrhs, x, ilsum, ScalePermstruct, Glu_persist, grid, SOLVEstruct); #if ( PRNTlevel>=2 ) t = SuperLU_timer_() - t; if ( !iam) printf(".. X to B redistribute time\t%8.4f\n", t); t = SuperLU_timer_(); #endif double tmp1=0; double tmp2=0; double tmp3=0; double tmp4=0; for(i=0;i<num_thread;i++){ tmp1 = SUPERLU_MAX(tmp1,stat_loc[i]->utime[SOL_TRSM]); tmp2 = SUPERLU_MAX(tmp2,stat_loc[i]->utime[SOL_GEMM]); tmp3 = SUPERLU_MAX(tmp3,stat_loc[i]->utime[SOL_COMM]); tmp4 += stat_loc[i]->ops[SOLVE]; #if ( PRNTlevel>=2 ) if(iam==0)printf("thread %5d gemm %9.5f\n",i,stat_loc[i]->utime[SOL_GEMM]); #endif } stat->utime[SOL_TRSM] += tmp1; stat->utime[SOL_GEMM] += tmp2; stat->utime[SOL_COMM] += tmp3; stat->ops[SOLVE]+= tmp4; /* Deallocate storage. */ for(i=0;i<num_thread;i++){ PStatFree(stat_loc[i]); SUPERLU_FREE(stat_loc[i]); } SUPERLU_FREE(stat_loc); SUPERLU_FREE(rtemp); SUPERLU_FREE(lsum); SUPERLU_FREE(x); SUPERLU_FREE(bmod); SUPERLU_FREE(brecv); SUPERLU_FREE(root_send); SUPERLU_FREE(rootsups); SUPERLU_FREE(recvbuf_BC_fwd); log_memory(-nlb*aln_i*iword-nlb*iword - nsupers_i*iword - (CEILING( nsupers, Pr )+CEILING( nsupers, Pc ))*aln_i*iword - maxrecvsz*(nbrecvx+1)*dword*2.0 - sizelsum*num_thread * dword*2.0 - (ldalsum * nrhs + nlb * XK_H) *dword*2.0 - (sizertemp*num_thread + 1)*dword*2.0, stat); //account for bmod, brecv, root_send, rootsups, recvbuf_BC_fwd,rtemp,lsum,x for (lk=0;lk<nsupers_j;++lk){ if(UBtree_ptr[lk]!=NULL){ // if(BcTree_IsRoot(LBtree_ptr[lk],'z')==YES){ BcTree_waitSendRequest(UBtree_ptr[lk],'z'); // } // deallocate requests here } } for (lk=0;lk<nsupers_i;++lk){ if(URtree_ptr[lk]!=NULL){ RdTree_waitSendRequest(URtree_ptr[lk],'z'); // deallocate requests here } } MPI_Barrier( grid->comm ); #if ( PROFlevel>=2 ) { float msg_vol_max, msg_vol_sum, msg_cnt_max, msg_cnt_sum; MPI_Reduce (&msg_cnt, &msg_cnt_sum, 1, MPI_FLOAT, MPI_SUM, 0, grid->comm); MPI_Reduce (&msg_cnt, &msg_cnt_max, 1, MPI_FLOAT, MPI_MAX, 0, grid->comm); MPI_Reduce (&msg_vol, &msg_vol_sum, 1, MPI_FLOAT, MPI_SUM, 0, grid->comm); MPI_Reduce (&msg_vol, &msg_vol_max, 1, MPI_FLOAT, MPI_MAX, 0, grid->comm); if (!iam) { printf ("\tPDGSTRS comm stat:" "\tAvg\tMax\t\tAvg\tMax\n" "\t\t\tCount:\t%.0f\t%.0f\tVol(MB)\t%.2f\t%.2f\n", msg_cnt_sum / Pr / Pc, msg_cnt_max, msg_vol_sum / Pr / Pc * 1e-6, msg_vol_max * 1e-6); } } #endif stat->utime[SOLVE] = SuperLU_timer_() - t1_sol; #if ( DEBUGlevel>=1 ) CHECK_MALLOC(iam, "Exit pzgstrs()"); #endif #if ( PRNTlevel>=2 ) float for_lu, total, max, avg, temp; superlu_dist_mem_usage_t num_mem_usage; dQuerySpace_dist(n, LUstruct, grid, stat, &num_mem_usage); temp = num_mem_usage.total; MPI_Reduce( &temp, &max, 1, MPI_FLOAT, MPI_MAX, 0, grid->comm ); MPI_Reduce( &temp, &avg, 1, MPI_FLOAT, MPI_SUM, 0, grid->comm ); if (!iam) { printf("\n** Memory Usage **********************************\n"); printf("** Total highmark (MB):\n" " Sum-of-all : %8.2f | Avg : %8.2f | Max : %8.2f\n", avg * 1e-6, avg / grid->nprow / grid->npcol * 1e-6, max * 1e-6); printf("**************************************************\n"); fflush(stdout); } #endif return; } /* PZGSTRS */
GB_unop__identity_bool_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_bool_uint32) // op(A') function: GB (_unop_tran__identity_bool_uint32) // C type: bool // A type: uint32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (bool) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (bool) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_bool_uint32) ( bool *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_bool_uint32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif