source
stringlengths
3
92
c
stringlengths
26
2.25M
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // /// \file /// This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/ASTFwd.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/TrailingObjects.h" #include <cassert> #include <cstddef> #include <iterator> #include <utility> namespace clang { class ASTContext; //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP clause. class OMPClause { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Ending location of the clause. SourceLocation EndLoc; /// Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// Returns the starting location of the clause. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns the ending location of the clause. SourceLocation getEndLoc() const { return EndLoc; } /// Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } /// Get the iterator range for the expressions used in the clauses. Used /// expressions include only the children that must be evaluated at the /// runtime before entering the construct. child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *) { return true; } }; /// Class that handles pre-initialization statement for some clauses, like /// 'shedule', 'firstprivate' etc. class OMPClauseWithPreInit { friend class OMPClauseReader; /// Pre-initialization statement for the clause. Stmt *PreInit = nullptr; /// Region that captures the associated stmt. OpenMPDirectiveKind CaptureRegion = llvm::omp::OMPD_unknown; protected: OMPClauseWithPreInit(const OMPClause *This) { assert(get(This) && "get is not tuned for pre-init."); } /// Set pre-initialization statement for the clause. void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = llvm::omp::OMPD_unknown) { PreInit = S; CaptureRegion = ThisRegion; } public: /// Get pre-initialization statement for the clause. const Stmt *getPreInitStmt() const { return PreInit; } /// Get pre-initialization statement for the clause. Stmt *getPreInitStmt() { return PreInit; } /// Get capture region for the stmt in the clause. OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; } static OMPClauseWithPreInit *get(OMPClause *C); static const OMPClauseWithPreInit *get(const OMPClause *C); }; /// Class that handles post-update expression for some clauses, like /// 'lastprivate', 'reduction' etc. class OMPClauseWithPostUpdate : public OMPClauseWithPreInit { friend class OMPClauseReader; /// Post-update expression for the clause. Expr *PostUpdate = nullptr; protected: OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) { assert(get(This) && "get is not tuned for post-update."); } /// Set pre-initialization statement for the clause. void setPostUpdateExpr(Expr *S) { PostUpdate = S; } public: /// Get post-update expression for the clause. const Expr *getPostUpdateExpr() const { return PostUpdate; } /// Get post-update expression for the clause. Expr *getPostUpdateExpr() { return PostUpdate; } static OMPClauseWithPostUpdate *get(OMPClause *C); static const OMPClauseWithPostUpdate *get(const OMPClause *C); }; /// This structure contains most locations needed for by an OMPVarListClause. struct OMPVarListLocTy { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Location of '('. SourceLocation LParenLoc; /// Ending location of the clause. SourceLocation EndLoc; OMPVarListLocTy() = default; OMPVarListLocTy(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : StartLoc(StartLoc), LParenLoc(LParenLoc), EndLoc(EndLoc) {} }; /// This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of variables in the list. unsigned NumVars; protected: /// Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} /// Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } /// Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy(VL.begin(), VL.end(), static_cast<T *>(this)->template getTrailingObjects<Expr *>()); } public: using varlist_iterator = MutableArrayRef<Expr *>::iterator; using varlist_const_iterator = ArrayRef<const Expr *>::iterator; using varlist_range = llvm::iterator_range<varlist_iterator>; using varlist_const_range = llvm::iterator_range<varlist_const_iterator>; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( static_cast<const T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } }; /// This represents 'allocator' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp allocate(a) allocator(omp_default_mem_alloc) /// \endcode /// In this example directive '#pragma omp allocate' has simple 'allocator' /// clause with the allocator 'omp_default_mem_alloc'. class OMPAllocatorClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression with the allocator. Stmt *Allocator = nullptr; /// Set allocator. void setAllocator(Expr *A) { Allocator = A; } public: /// Build 'allocator' clause with the given allocator. /// /// \param A Allocator. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_allocator, StartLoc, EndLoc), LParenLoc(LParenLoc), Allocator(A) {} /// Build an empty clause. OMPAllocatorClause() : OMPClause(llvm::omp::OMPC_allocator, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns allocator. Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); } child_range children() { return child_range(&Allocator, &Allocator + 1); } const_child_range children() const { return const_child_range(&Allocator, &Allocator + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_allocator; } }; /// This represents clause 'allocate' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a) allocate(omp_default_mem_alloc :a) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// and clause 'allocate' for the variable 'a'. class OMPAllocateClause final : public OMPVarListClause<OMPAllocateClause>, private llvm::TrailingObjects<OMPAllocateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Allocator specified in the clause, or 'nullptr' if the default one is /// used. Expr *Allocator = nullptr; /// Position of the ':' delimiter in the clause; SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, StartLoc, LParenLoc, EndLoc, N), Allocator(Allocator), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPAllocateClause(unsigned N) : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } void setAllocator(Expr *A) { Allocator = A; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPAllocateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Returns the allocator expression or nullptr, if no allocator is specified. Expr *getAllocator() const { return Allocator; } /// Returns the location of the ':' delimiter. SourceLocation getColonLoc() const { return ColonLoc; } /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPAllocateClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAllocateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_allocate; } }; /// This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(parallel:a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' clause with /// condition 'a > 5' and directive name modifier 'parallel'. class OMPIfClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Location of ':' (if any). SourceLocation ColonLoc; /// Directive name modifier for the clause. OpenMPDirectiveKind NameModifier = llvm::omp::OMPD_unknown; /// Name modifier location. SourceLocation NameModifierLoc; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Set directive name modifier for the clause. void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; } /// Set location of directive name modifier for the clause. void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; } /// Set location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Build 'if' clause with condition \a Cond. /// /// \param NameModifier [OpenMP 4.1] Directive name modifier of clause. /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param NameModifierLoc Location of directive name modifier. /// \param ColonLoc [OpenMP 4.1] Location of ':'. /// \param EndLoc Ending location of the clause. OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc), NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPIfClause() : OMPClause(llvm::omp::OMPC_if, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } /// Return directive name modifier associated with the clause. OpenMPDirectiveKind getNameModifier() const { return NameModifier; } /// Return the location of directive name modifier. SourceLocation getNameModifierLoc() const { return NameModifierLoc; } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPIfClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_if; } }; /// This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. class OMPFinalClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } public: /// Build 'final' clause with condition \a Cond. /// /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPFinalClause(Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_final, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPFinalClause() : OMPClause(llvm::omp::OMPC_final, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPFinalClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_final; } }; /// This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'num_threads' clause. Stmt *NumThreads = nullptr; /// Set condition. void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param HelperNumThreads Helper Number of threads for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_threads, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumThreads(NumThreads) { setPreInitStmt(HelperNumThreads, CaptureRegion); } /// Build an empty clause. OMPNumThreadsClause() : OMPClause(llvm::omp::OMPC_num_threads, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } child_range children() { return child_range(&NumThreads, &NumThreads + 1); } const_child_range children() const { return const_child_range(&NumThreads, &NumThreads + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_threads; } }; /// This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Safelen = nullptr; /// Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// Build an empty clause. explicit OMPSafelenClause() : OMPClause(llvm::omp::OMPC_safelen, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } child_range children() { return child_range(&Safelen, &Safelen + 1); } const_child_range children() const { return const_child_range(&Safelen, &Safelen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_safelen; } }; /// This represents 'simdlen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd simdlen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'simdlen' /// with single expression '4'. /// If the 'simdlen' clause is used then it specifies the preferred number of /// iterations to be executed concurrently. The parameter of the 'simdlen' /// clause must be a constant positive integer expression. class OMPSimdlenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Simdlen = nullptr; /// Set simdlen. void setSimdlen(Expr *Len) { Simdlen = Len; } public: /// Build 'simdlen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc), Simdlen(Len) {} /// Build an empty clause. explicit OMPSimdlenClause() : OMPClause(llvm::omp::OMPC_simdlen, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); } child_range children() { return child_range(&Simdlen, &Simdlen + 1); } const_child_range children() const { return const_child_range(&Simdlen, &Simdlen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_simdlen; } }; /// This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// Build an empty clause. explicit OMPCollapseClause() : OMPClause(llvm::omp::OMPC_collapse, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_collapse; } }; /// This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. llvm::omp::DefaultKind Kind = llvm::omp::OMP_DEFAULT_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clauses. /// /// \param K Argument of clause. void setDefaultKind(llvm::omp::DefaultKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDefaultClause(llvm::omp::DefaultKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPDefaultClause() : OMPClause(llvm::omp::OMPC_default, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. llvm::omp::DefaultKind getDefaultKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_default; } }; /// This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'proc_bind' clause. llvm::omp::ProcBindKind Kind = llvm::omp::OMP_PROC_BIND_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setProcBindKind(llvm::omp::ProcBindKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPProcBindClause(llvm::omp::ProcBindKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPProcBindClause() : OMPClause(llvm::omp::OMPC_proc_bind, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. llvm::omp::ProcBindKind getProcBindKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_proc_bind; } }; /// This represents 'unified_address' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_address /// \endcode /// In this example directive '#pragma omp requires' has 'unified_address' /// clause. class OMPUnifiedAddressClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_address' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_unified_address, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedAddressClause() : OMPClause(llvm::omp::OMPC_unified_address, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_unified_address; } }; /// This represents 'unified_shared_memory' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_shared_memory /// \endcode /// In this example directive '#pragma omp requires' has 'unified_shared_memory' /// clause. class OMPUnifiedSharedMemoryClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_shared_memory' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_unified_shared_memory, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedSharedMemoryClause() : OMPClause(llvm::omp::OMPC_unified_shared_memory, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_unified_shared_memory; } }; /// This represents 'reverse_offload' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires reverse_offload /// \endcode /// In this example directive '#pragma omp requires' has 'reverse_offload' /// clause. class OMPReverseOffloadClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'reverse_offload' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_reverse_offload, StartLoc, EndLoc) {} /// Build an empty clause. OMPReverseOffloadClause() : OMPClause(llvm::omp::OMPC_reverse_offload, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_reverse_offload; } }; /// This represents 'dynamic_allocators' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires dynamic_allocators /// \endcode /// In this example directive '#pragma omp requires' has 'dynamic_allocators' /// clause. class OMPDynamicAllocatorsClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'dynamic_allocators' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_dynamic_allocators, StartLoc, EndLoc) {} /// Build an empty clause. OMPDynamicAllocatorsClause() : OMPClause(llvm::omp::OMPC_dynamic_allocators, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_dynamic_allocators; } }; /// This represents 'atomic_default_mem_order' clause in the '#pragma omp /// requires' directive. /// /// \code /// #pragma omp requires atomic_default_mem_order(seq_cst) /// \endcode /// In this example directive '#pragma omp requires' has simple /// atomic_default_mem_order' clause with kind 'seq_cst'. class OMPAtomicDefaultMemOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '(' SourceLocation LParenLoc; /// A kind of the 'atomic_default_mem_order' clause. OpenMPAtomicDefaultMemOrderClauseKind Kind = OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst', /// 'acq_rel' or 'relaxed'). /// /// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPAtomicDefaultMemOrderClause() : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the locaiton of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_atomic_default_mem_order; } }; /// This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown; /// Modifiers for 'schedule' clause. enum {FIRST, SECOND, NUM_MODIFIERS}; OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS]; /// Locations of modifiers. SourceLocation ModifiersLoc[NUM_MODIFIERS]; /// Start location of the schedule ind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// Set the first schedule modifier. /// /// \param M Schedule modifier. void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[FIRST] = M; } /// Set the second schedule modifier. /// /// \param M Schedule modifier. void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[SECOND] = M; } /// Set location of the first schedule modifier. void setFirstScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[FIRST] = Loc; } /// Set location of the second schedule modifier. void setSecondScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[SECOND] = Loc; } /// Set schedule modifier location. /// /// \param M Schedule modifier location. void setScheduleModifer(OpenMPScheduleClauseModifier M) { if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown) Modifiers[FIRST] = M; else { assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown); Modifiers[SECOND] = M; } } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// \param M1 The first modifier applied to 'schedule' clause. /// \param M1Loc Location of the first modifier /// \param M2 The second modifier applied to 'schedule' clause. /// \param M2Loc Location of the second modifier OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize, OpenMPScheduleClauseModifier M1, SourceLocation M1Loc, OpenMPScheduleClauseModifier M2, SourceLocation M2Loc) : OMPClause(llvm::omp::OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); Modifiers[FIRST] = M1; Modifiers[SECOND] = M2; ModifiersLoc[FIRST] = M1Loc; ModifiersLoc[SECOND] = M2Loc; } /// Build an empty clause. explicit OMPScheduleClause() : OMPClause(llvm::omp::OMPC_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) { Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown; Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown; } /// Get kind of the clause. OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// Get the first modifier of the clause. OpenMPScheduleClauseModifier getFirstScheduleModifier() const { return Modifiers[FIRST]; } /// Get the second modifier of the clause. OpenMPScheduleClauseModifier getSecondScheduleModifier() const { return Modifiers[SECOND]; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getScheduleKindLoc() { return KindLoc; } /// Get the first modifier location. SourceLocation getFirstScheduleModifierLoc() const { return ModifiersLoc[FIRST]; } /// Get the second modifier location. SourceLocation getSecondScheduleModifierLoc() const { return ModifiersLoc[SECOND]; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_schedule; } }; /// This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered (2) /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause with /// parameter 2. class OMPOrderedClause final : public OMPClause, private llvm::TrailingObjects<OMPOrderedClause, Expr *> { friend class OMPClauseReader; friend TrailingObjects; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Real number of loops. unsigned NumberOfLoops = 0; /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num), NumberOfLoops(NumLoops) {} /// Build an empty clause. explicit OMPOrderedClause(unsigned NumLoops) : OMPClause(llvm::omp::OMPC_ordered, SourceLocation(), SourceLocation()), NumberOfLoops(NumLoops) {} /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. static OMPOrderedClause *Create(const ASTContext &C, Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Build an empty clause. static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops); /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } /// Set number of iterations for the specified loop. void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations); /// Get number of iterations for all the loops. ArrayRef<Expr *> getLoopNumIterations() const; /// Set loop counter for the specified loop. void setLoopCounter(unsigned NumLoop, Expr *Counter); /// Get loops counter for the specified loop. Expr *getLoopCounter(unsigned NumLoop); const Expr *getLoopCounter(unsigned NumLoop) const; child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_ordered; } }; /// This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. class OMPNowaitClause : public OMPClause { public: /// Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nowait, StartLoc, EndLoc) {} /// Build an empty clause. OMPNowaitClause() : OMPClause(llvm::omp::OMPC_nowait, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nowait; } }; /// This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. class OMPUntiedClause : public OMPClause { public: /// Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_untied, StartLoc, EndLoc) {} /// Build an empty clause. OMPUntiedClause() : OMPClause(llvm::omp::OMPC_untied, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_untied; } }; /// This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. class OMPMergeableClause : public OMPClause { public: /// Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_mergeable, StartLoc, EndLoc) {} /// Build an empty clause. OMPMergeableClause() : OMPClause(llvm::omp::OMPC_mergeable, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_mergeable; } }; /// This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. class OMPReadClause : public OMPClause { public: /// Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_read, StartLoc, EndLoc) {} /// Build an empty clause. OMPReadClause() : OMPClause(llvm::omp::OMPC_read, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_read; } }; /// This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. class OMPWriteClause : public OMPClause { public: /// Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_write, StartLoc, EndLoc) {} /// Build an empty clause. OMPWriteClause() : OMPClause(llvm::omp::OMPC_write, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_write; } }; /// This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. /// Also, this class represents 'update' clause in '#pragma omp depobj' /// directive. /// /// \code /// #pragma omp depobj(a) update(in) /// \endcode /// In this example directive '#pragma omp depobj' has 'update' clause with 'in' /// dependence kind. class OMPUpdateClause final : public OMPClause, private llvm::TrailingObjects<OMPUpdateClause, SourceLocation, OpenMPDependClauseKind> { friend class OMPClauseReader; friend TrailingObjects; /// true if extended version of the clause for 'depobj' directive. bool IsExtended = false; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<SourceLocation>) const { // 2 locations: for '(' and argument location. return IsExtended ? 2 : 0; } /// Sets the the location of '(' in clause for 'depobj' directive. void setLParenLoc(SourceLocation Loc) { assert(IsExtended && "Expected extended clause."); *getTrailingObjects<SourceLocation>() = Loc; } /// Sets the the location of '(' in clause for 'depobj' directive. void setArgumentLoc(SourceLocation Loc) { assert(IsExtended && "Expected extended clause."); *std::next(getTrailingObjects<SourceLocation>(), 1) = Loc; } /// Sets the dependence kind for the clause for 'depobj' directive. void setDependencyKind(OpenMPDependClauseKind DK) { assert(IsExtended && "Expected extended clause."); *getTrailingObjects<OpenMPDependClauseKind>() = DK; } /// Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc, bool IsExtended) : OMPClause(llvm::omp::OMPC_update, StartLoc, EndLoc), IsExtended(IsExtended) {} /// Build an empty clause. OMPUpdateClause(bool IsExtended) : OMPClause(llvm::omp::OMPC_update, SourceLocation(), SourceLocation()), IsExtended(IsExtended) {} public: /// Creates clause for 'atomic' directive. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates clause for 'depobj' directive. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ArgumentLoc Location of the argument. /// \param DK Dependence kind. /// \param EndLoc Ending location of the clause. static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, OpenMPDependClauseKind DK, SourceLocation EndLoc); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param IsExtended true if extended clause for 'depobj' directive must be /// created. static OMPUpdateClause *CreateEmpty(const ASTContext &C, bool IsExtended); /// Checks if the clause is the extended clauses for 'depobj' directive. bool isExtended() const { return IsExtended; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } /// Gets the the location of '(' in clause for 'depobj' directive. SourceLocation getLParenLoc() const { assert(IsExtended && "Expected extended clause."); return *getTrailingObjects<SourceLocation>(); } /// Gets the the location of argument in clause for 'depobj' directive. SourceLocation getArgumentLoc() const { assert(IsExtended && "Expected extended clause."); return *std::next(getTrailingObjects<SourceLocation>(), 1); } /// Gets the dependence kind in clause for 'depobj' directive. OpenMPDependClauseKind getDependencyKind() const { assert(IsExtended && "Expected extended clause."); return *getTrailingObjects<OpenMPDependClauseKind>(); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_update; } }; /// This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. class OMPCaptureClause : public OMPClause { public: /// Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_capture, StartLoc, EndLoc) {} /// Build an empty clause. OMPCaptureClause() : OMPClause(llvm::omp::OMPC_capture, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_capture; } }; /// This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. class OMPSeqCstClause : public OMPClause { public: /// Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_seq_cst, StartLoc, EndLoc) {} /// Build an empty clause. OMPSeqCstClause() : OMPClause(llvm::omp::OMPC_seq_cst, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_seq_cst; } }; /// This represents 'acq_rel' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush acq_rel /// \endcode /// In this example directive '#pragma omp flush' has 'acq_rel' clause. class OMPAcqRelClause final : public OMPClause { public: /// Build 'ack_rel' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_acq_rel, StartLoc, EndLoc) {} /// Build an empty clause. OMPAcqRelClause() : OMPClause(llvm::omp::OMPC_acq_rel, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_acq_rel; } }; /// This represents 'acquire' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush acquire /// \endcode /// In this example directive '#pragma omp flush' has 'acquire' clause. class OMPAcquireClause final : public OMPClause { public: /// Build 'acquire' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_acquire, StartLoc, EndLoc) {} /// Build an empty clause. OMPAcquireClause() : OMPClause(llvm::omp::OMPC_acquire, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_acquire; } }; /// This represents 'release' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush release /// \endcode /// In this example directive '#pragma omp flush' has 'release' clause. class OMPReleaseClause final : public OMPClause { public: /// Build 'release' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_release, StartLoc, EndLoc) {} /// Build an empty clause. OMPReleaseClause() : OMPClause(llvm::omp::OMPC_release, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_release; } }; /// This represents 'relaxed' clause in the '#pragma omp atomic' /// directives. /// /// \code /// #pragma omp atomic relaxed /// \endcode /// In this example directive '#pragma omp atomic' has 'relaxed' clause. class OMPRelaxedClause final : public OMPClause { public: /// Build 'relaxed' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_relaxed, StartLoc, EndLoc) {} /// Build an empty clause. OMPRelaxedClause() : OMPClause(llvm::omp::OMPC_relaxed, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_relaxed; } }; /// This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. class OMPPrivateClause final : public OMPVarListClause<OMPPrivateClause>, private llvm::TrailingObjects<OMPPrivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPPrivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_private; } }; /// This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. class OMPFirstprivateClause final : public OMPVarListClause<OMPFirstprivateClause>, public OMPClauseWithPreInit, private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(llvm::omp::OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPreInit(this) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( llvm::omp::OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPreInit(this) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL, Stmt *PreInit); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_firstprivate; } }; /// This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause final : public OMPVarListClause<OMPLastprivateClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLastprivateClause, Expr *> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Optional lastprivate kind, e.g. 'conditional', if specified by user. OpenMPLastprivateModifier LPKind; /// Optional location of the lasptrivate kind, if specified by user. SourceLocation LPKindLoc; /// Optional colon location, if specified by user. SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(llvm::omp::OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), LPKind(LPKind), LPKindLoc(LPKindLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( llvm::omp::OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } /// Sets lastprivate kind. void setKind(OpenMPLastprivateModifier Kind) { LPKind = Kind; } /// Sets location of the lastprivate kind. void setKindLoc(SourceLocation Loc) { LPKindLoc = Loc; } /// Sets colon symbol location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// \param LPKind Lastprivate kind, e.g. 'conditional'. /// \param LPKindLoc Location of the lastprivate kind. /// \param ColonLoc Location of the ':' symbol if lastprivate kind is used. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); /// Lastprivate kind. OpenMPLastprivateModifier getKind() const { return LPKind; } /// Returns the location of the lastprivate kind. SourceLocation getKindLoc() const { return LPKindLoc; } /// Returns the location of the ':' symbol, if any. SourceLocation getColonLoc() const { return ColonLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; /// Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLastprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_lastprivate; } }; /// This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. class OMPSharedClause final : public OMPVarListClause<OMPSharedClause>, private llvm::TrailingObjects<OMPSharedClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPSharedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_shared; } }; /// This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. class OMPReductionClause final : public OMPVarListClause<OMPReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Reduction modifier. OpenMPReductionClauseModifier Modifier = OMPC_REDUCTION_unknown; /// Reduction modifier location. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets reduction modifier. void setModifier(OpenMPReductionClauseModifier M) { Modifier = M; } /// Sets location of the modifier. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private copy of the reduction /// variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper copy operations for inscan reductions. /// The form is: Temps[i] = LHS[i]; void setInscanCopyOps(ArrayRef<Expr *> Ops); /// Get the list of helper inscan copy operations. MutableArrayRef<Expr *> getInscanCopyOps() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyOps() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } /// Set list of helper temp vars for inscan copy array operations. void setInscanCopyArrayTemps(ArrayRef<Expr *> CopyArrayTemps); /// Get the list of helper inscan copy temps. MutableArrayRef<Expr *> getInscanCopyArrayTemps() { return MutableArrayRef<Expr *>(getInscanCopyOps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyArrayTemps() const { return llvm::makeArrayRef(getInscanCopyOps().end(), varlist_size()); } /// Set list of helper temp elements vars for inscan copy array operations. void setInscanCopyArrayElems(ArrayRef<Expr *> CopyArrayElems); /// Get the list of helper inscan copy temps. MutableArrayRef<Expr *> getInscanCopyArrayElems() { return MutableArrayRef<Expr *>(getInscanCopyArrayTemps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyArrayElems() const { return llvm::makeArrayRef(getInscanCopyArrayTemps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param CopyOps List of copy operations for inscan reductions: /// \code /// TempExprs = LHSExprs; /// \endcode /// \param CopyArrayTemps Temp arrays for prefix sums. /// \param CopyArrayElems Temp arrays for prefix sums. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> CopyOps, ArrayRef<Expr *> CopyArrayTemps, ArrayRef<Expr *> CopyArrayElems, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param Modifier Reduction modifier. static OMPReductionClause * CreateEmpty(const ASTContext &C, unsigned N, OpenMPReductionClauseModifier Modifier); /// Returns modifier. OpenMPReductionClauseModifier getModifier() const { return Modifier; } /// Returns modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range copy_ops() const { return helper_expr_const_range(getInscanCopyOps().begin(), getInscanCopyOps().end()); } helper_expr_range copy_ops() { return helper_expr_range(getInscanCopyOps().begin(), getInscanCopyOps().end()); } helper_expr_const_range copy_array_temps() const { return helper_expr_const_range(getInscanCopyArrayTemps().begin(), getInscanCopyArrayTemps().end()); } helper_expr_range copy_array_temps() { return helper_expr_range(getInscanCopyArrayTemps().begin(), getInscanCopyArrayTemps().end()); } helper_expr_const_range copy_array_elems() const { return helper_expr_const_range(getInscanCopyArrayElems().begin(), getInscanCopyArrayElems().end()); } helper_expr_range copy_array_elems() { return helper_expr_range(getInscanCopyArrayElems().begin(), getInscanCopyArrayElems().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPReductionClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_reduction; } }; /// This represents clause 'task_reduction' in the '#pragma omp taskgroup' /// directives. /// /// \code /// #pragma omp taskgroup task_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp taskgroup' has clause /// 'task_reduction' with operator '+' and the variables 'a' and 'b'. class OMPTaskReductionClause final : public OMPVarListClause<OMPTaskReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPTaskReductionClause>( llvm::omp::OMPC_task_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPTaskReductionClause(unsigned N) : OMPVarListClause<OMPTaskReductionClause>( llvm::omp::OMPC_task_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPTaskReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPTaskReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_task_reduction; } }; /// This represents clause 'in_reduction' in the '#pragma omp task' directives. /// /// \code /// #pragma omp task in_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp task' has clause 'in_reduction' with /// operator '+' and the variables 'a' and 'b'. class OMPInReductionClause final : public OMPVarListClause<OMPInReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPInReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPInReductionClause>(llvm::omp::OMPC_in_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInReductionClause(unsigned N) : OMPVarListClause<OMPInReductionClause>( llvm::omp::OMPC_in_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper reduction taskgroup descriptors. void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction taskgroup descriptors. MutableArrayRef<Expr *> getTaskgroupDescriptors() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getTaskgroupDescriptors() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param TaskgroupDescriptors List of helper taskgroup descriptors for /// corresponding items in parent taskgroup task_reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPInReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range taskgroup_descriptors() const { return helper_expr_const_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } helper_expr_range taskgroup_descriptors() { return helper_expr_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_in_reduction; } }; /// This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. class OMPLinearClause final : public OMPVarListClause<OMPLinearClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLinearClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Modifier of 'linear' clause. OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val; /// Location of linear modifier if any. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), OMPClauseWithPostUpdate(this) {} /// Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[]; /// Finals[]; Step; CalcStep; } MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// Gets the list of used expressions for linear variables. MutableArrayRef<Expr *> getUsedExprs() { return MutableArrayRef<Expr *>(getFinals().end() + 2, varlist_size() + 1); } ArrayRef<const Expr *> getUsedExprs() const { return llvm::makeArrayRef(getFinals().end() + 2, varlist_size() + 1); } /// Sets the list of the copies of original linear variables. /// \param PL List of expressions. void setPrivates(ArrayRef<Expr *> PL); /// Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Modifier Modifier of 'linear' clause. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PL List of private copies of original variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Set modifier. void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; } /// Return modifier. OpenMPLinearClauseKind getModifier() const { return Modifier; } /// Set modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Return modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); /// Sets the list of used expressions for the linear clause. void setUsedExprs(ArrayRef<Expr *> UE); using privates_iterator = MutableArrayRef<Expr *>::iterator; using privates_const_iterator = ArrayRef<const Expr *>::iterator; using privates_range = llvm::iterator_range<privates_iterator>; using privates_const_range = llvm::iterator_range<privates_const_iterator>; privates_range privates() { return privates_range(getPrivates().begin(), getPrivates().end()); } privates_const_range privates() const { return privates_const_range(getPrivates().begin(), getPrivates().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } using updates_iterator = MutableArrayRef<Expr *>::iterator; using updates_const_iterator = ArrayRef<const Expr *>::iterator; using updates_range = llvm::iterator_range<updates_iterator>; using updates_const_range = llvm::iterator_range<updates_const_iterator>; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } using finals_iterator = MutableArrayRef<Expr *>::iterator; using finals_const_iterator = ArrayRef<const Expr *>::iterator; using finals_range = llvm::iterator_range<finals_iterator>; using finals_const_range = llvm::iterator_range<finals_const_iterator>; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } using used_expressions_iterator = MutableArrayRef<Expr *>::iterator; using used_expressions_const_iterator = ArrayRef<const Expr *>::iterator; using used_expressions_range = llvm::iterator_range<used_expressions_iterator>; using used_expressions_const_range = llvm::iterator_range<used_expressions_const_iterator>; used_expressions_range used_expressions() { return finals_range(getUsedExprs().begin(), getUsedExprs().end()); } used_expressions_const_range used_expressions() const { return finals_const_range(getUsedExprs().begin(), getUsedExprs().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLinearClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPLinearClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_linear; } }; /// This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. class OMPAlignedClause final : public OMPVarListClause<OMPAlignedClause>, private llvm::TrailingObjects<OMPAlignedClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars) {} public: /// Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAlignedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_aligned; } }; /// This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. class OMPCopyinClause final : public OMPVarListClause<OMPCopyinClause>, private llvm::TrailingObjects<OMPCopyinClause, Expr *> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyinClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_copyin; } }; /// This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. class OMPCopyprivateClause final : public OMPVarListClause<OMPCopyprivateClause>, private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(llvm::omp::OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) { } /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( llvm::omp::OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_copyprivate; } }; /// This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. class OMPFlushClause final : public OMPVarListClause<OMPFlushClause>, private llvm::TrailingObjects<OMPFlushClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFlushClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_flush; } }; /// This represents implicit clause 'depobj' for the '#pragma omp depobj' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// depobj' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp depobj(a) destroy /// \endcode /// In this example directive '#pragma omp depobj' has implicit clause 'depobj' /// with the depobj 'a'. class OMPDepobjClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Chunk size. Expr *Depobj = nullptr; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDepobjClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_depobj, StartLoc, EndLoc), LParenLoc(LParenLoc) {} /// Build an empty clause. /// explicit OMPDepobjClause() : OMPClause(llvm::omp::OMPC_depobj, SourceLocation(), SourceLocation()) {} void setDepobj(Expr *E) { Depobj = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Creates clause. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param Depobj depobj expression associated with the 'depobj' directive. static OMPDepobjClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *Depobj); /// Creates an empty clause. /// /// \param C AST context. static OMPDepobjClause *CreateEmpty(const ASTContext &C); /// Returns depobj expression associated with the clause. Expr *getDepobj() { return Depobj; } const Expr *getDepobj() const { return Depobj; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&Depobj), reinterpret_cast<Stmt **>(&Depobj) + 1); } const_child_range children() const { auto Children = const_cast<OMPDepobjClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_depobj; } }; /// This represents implicit clause 'depend' for the '#pragma omp task' /// directive. /// /// \code /// #pragma omp task depend(in:a,b) /// \endcode /// In this example directive '#pragma omp task' with clause 'depend' with the /// variables 'a' and 'b' with dependency 'in'. class OMPDependClause final : public OMPVarListClause<OMPDependClause>, private llvm::TrailingObjects<OMPDependClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Dependency type (one of in, out, inout). OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; /// Dependency type location. SourceLocation DepLoc; /// Colon location. SourceLocation ColonLoc; /// Number of loops, associated with the depend clause. unsigned NumLoops = 0; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param NumLoops Number of loops that is associated with this depend /// clause. OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, StartLoc, LParenLoc, EndLoc, N), NumLoops(NumLoops) {} /// Build an empty clause. /// /// \param N Number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. explicit OMPDependClause(unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, SourceLocation(), SourceLocation(), SourceLocation(), N), NumLoops(NumLoops) {} /// Set dependency kind. void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; } /// Set dependency kind and its location. void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Sets optional dependency modifier. void setModifier(Expr *DepModifier); public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param DepKind Dependency type. /// \param DepLoc Location of the dependency type. /// \param ColonLoc Colon location. /// \param VL List of references to the variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL, unsigned NumLoops); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N, unsigned NumLoops); /// Get dependency type. OpenMPDependClauseKind getDependencyKind() const { return DepKind; } /// Return optional depend modifier. Expr *getModifier(); const Expr *getModifier() const { return const_cast<OMPDependClause *>(this)->getModifier(); } /// Get dependency type location. SourceLocation getDependencyLoc() const { return DepLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } /// Get number of loops associated with the clause. unsigned getNumLoops() const { return NumLoops; } /// Set the loop data for the depend clauses with 'sink|source' kind of /// dependency. void setLoopData(unsigned NumLoop, Expr *Cnt); /// Get the loop data. Expr *getLoopData(unsigned NumLoop); const Expr *getLoopData(unsigned NumLoop) const; child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPDependClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_depend; } }; /// This represents 'device' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp target device(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'device' /// with single expression 'a'. class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Device clause modifier. OpenMPDeviceClauseModifier Modifier = OMPC_DEVICE_unknown; /// Location of the modifier. SourceLocation ModifierLoc; /// Device number. Stmt *Device = nullptr; /// Set the device number. /// /// \param E Device number. void setDevice(Expr *E) { Device = E; } /// Sets modifier. void setModifier(OpenMPDeviceClauseModifier M) { Modifier = M; } /// Setst modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } public: /// Build 'device' clause. /// /// \param Modifier Clause modifier. /// \param E Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param ModifierLoc Modifier location. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Modifier(Modifier), ModifierLoc(ModifierLoc), Device(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPDeviceClause() : OMPClause(llvm::omp::OMPC_device, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return device number. Expr *getDevice() { return cast<Expr>(Device); } /// Return device number. Expr *getDevice() const { return cast<Expr>(Device); } /// Gets modifier. OpenMPDeviceClauseModifier getModifier() const { return Modifier; } /// Gets modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(&Device, &Device + 1); } const_child_range children() const { return const_child_range(&Device, &Device + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_device; } }; /// This represents 'threads' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered threads /// \endcode /// In this example directive '#pragma omp ordered' has simple 'threads' clause. class OMPThreadsClause : public OMPClause { public: /// Build 'threads' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_threads, StartLoc, EndLoc) {} /// Build an empty clause. OMPThreadsClause() : OMPClause(llvm::omp::OMPC_threads, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_threads; } }; /// This represents 'simd' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered simd /// \endcode /// In this example directive '#pragma omp ordered' has simple 'simd' clause. class OMPSIMDClause : public OMPClause { public: /// Build 'simd' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_simd, StartLoc, EndLoc) {} /// Build an empty clause. OMPSIMDClause() : OMPClause(llvm::omp::OMPC_simd, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_simd; } }; /// Struct that defines common infrastructure to handle mappable /// expressions used in OpenMP clauses. class OMPClauseMappableExprCommon { public: /// Class that represents a component of a mappable expression. E.g. /// for an expression S.a, the first component is a declaration reference /// expression associated with 'S' and the second is a member expression /// associated with the field declaration 'a'. If the expression is an array /// subscript it may not have any associated declaration. In that case the /// associated declaration is set to nullptr. class MappableComponent { /// Pair of Expression and Non-contiguous pair associated with the /// component. llvm::PointerIntPair<Expr *, 1, bool> AssociatedExpressionNonContiguousPr; /// Declaration associated with the declaration. If the component does /// not have a declaration (e.g. array subscripts or section), this is set /// to nullptr. ValueDecl *AssociatedDeclaration = nullptr; public: explicit MappableComponent() = default; explicit MappableComponent(Expr *AssociatedExpression, ValueDecl *AssociatedDeclaration, bool IsNonContiguous) : AssociatedExpressionNonContiguousPr(AssociatedExpression, IsNonContiguous), AssociatedDeclaration( AssociatedDeclaration ? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl()) : nullptr) {} Expr *getAssociatedExpression() const { return AssociatedExpressionNonContiguousPr.getPointer(); } bool isNonContiguous() const { return AssociatedExpressionNonContiguousPr.getInt(); } ValueDecl *getAssociatedDeclaration() const { return AssociatedDeclaration; } }; // List of components of an expression. This first one is the whole // expression and the last one is the base expression. using MappableExprComponentList = SmallVector<MappableComponent, 8>; using MappableExprComponentListRef = ArrayRef<MappableComponent>; // List of all component lists associated to the same base declaration. // E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have // their component list but the same base declaration 'S'. using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>; using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>; protected: // Return the total number of elements in a list of component lists. static unsigned getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists); // Return the total number of elements in a list of declarations. All // declarations are expected to be canonical. static unsigned getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations); }; /// This structure contains all sizes needed for by an /// OMPMappableExprListClause. struct OMPMappableExprListSizeTy { /// Number of expressions listed. unsigned NumVars; /// Number of unique base declarations. unsigned NumUniqueDeclarations; /// Number of component lists. unsigned NumComponentLists; /// Total number of expression components. unsigned NumComponents; OMPMappableExprListSizeTy() = default; OMPMappableExprListSizeTy(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : NumVars(NumVars), NumUniqueDeclarations(NumUniqueDeclarations), NumComponentLists(NumComponentLists), NumComponents(NumComponents) {} }; /// This represents clauses with a list of expressions that are mappable. /// Examples of these clauses are 'map' in /// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from /// in '#pragma omp target update...' directives. template <class T> class OMPMappableExprListClause : public OMPVarListClause<T>, public OMPClauseMappableExprCommon { friend class OMPClauseReader; /// Number of unique declarations in this clause. unsigned NumUniqueDeclarations; /// Number of component lists in this clause. unsigned NumComponentLists; /// Total number of components in this clause. unsigned NumComponents; /// Whether this clause is possible to have user-defined mappers associated. /// It should be true for map, to, and from clauses, and false for /// use_device_ptr and is_device_ptr. const bool SupportsMapper; /// C++ nested name specifier for the associated user-defined mapper. NestedNameSpecifierLoc MapperQualifierLoc; /// The associated user-defined mapper identifier information. DeclarationNameInfo MapperIdInfo; protected: /// Build a clause for \a NumUniqueDeclarations declarations, \a /// NumComponentLists total component lists, and \a NumComponents total /// components. /// /// \param K Kind of the clause. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. /// \param SupportsMapper Indicates whether this clause is possible to have /// user-defined mappers associated. /// \param MapperQualifierLocPtr C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfoPtr The identifier of associated user-defined mapper. OMPMappableExprListClause( OpenMPClauseKind K, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes, bool SupportsMapper = false, NestedNameSpecifierLoc *MapperQualifierLocPtr = nullptr, DeclarationNameInfo *MapperIdInfoPtr = nullptr) : OMPVarListClause<T>(K, Locs.StartLoc, Locs.LParenLoc, Locs.EndLoc, Sizes.NumVars), NumUniqueDeclarations(Sizes.NumUniqueDeclarations), NumComponentLists(Sizes.NumComponentLists), NumComponents(Sizes.NumComponents), SupportsMapper(SupportsMapper) { if (MapperQualifierLocPtr) MapperQualifierLoc = *MapperQualifierLocPtr; if (MapperIdInfoPtr) MapperIdInfo = *MapperIdInfoPtr; } /// Get the unique declarations that are in the trailing objects of the /// class. MutableArrayRef<ValueDecl *> getUniqueDeclsRef() { return MutableArrayRef<ValueDecl *>( static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Get the unique declarations that are in the trailing objects of the /// class. ArrayRef<ValueDecl *> getUniqueDeclsRef() const { return ArrayRef<ValueDecl *>( static_cast<const T *>(this) ->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Set the unique declarations that are in the trailing objects of the /// class. void setUniqueDecls(ArrayRef<ValueDecl *> UDs) { assert(UDs.size() == NumUniqueDeclarations && "Unexpected amount of unique declarations."); std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin()); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. MutableArrayRef<unsigned> getDeclNumListsRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. ArrayRef<unsigned> getDeclNumListsRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Set the number of lists per declaration that are in the trailing /// objects of the class. void setDeclNumLists(ArrayRef<unsigned> DNLs) { assert(DNLs.size() == NumUniqueDeclarations && "Unexpected amount of list numbers."); std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin()); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. MutableArrayRef<unsigned> getComponentListSizesRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. ArrayRef<unsigned> getComponentListSizesRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Set the cumulative component lists sizes that are in the trailing /// objects of the class. void setComponentListSizes(ArrayRef<unsigned> CLSs) { assert(CLSs.size() == NumComponentLists && "Unexpected amount of component lists."); std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin()); } /// Get the components that are in the trailing objects of the class. MutableArrayRef<MappableComponent> getComponentsRef() { return MutableArrayRef<MappableComponent>( static_cast<T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Get the components that are in the trailing objects of the class. ArrayRef<MappableComponent> getComponentsRef() const { return ArrayRef<MappableComponent>( static_cast<const T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Set the components that are in the trailing objects of the class. /// This requires the list sizes so that it can also fill the original /// expressions, which are the first component of each list. void setComponents(ArrayRef<MappableComponent> Components, ArrayRef<unsigned> CLSs) { assert(Components.size() == NumComponents && "Unexpected amount of component lists."); assert(CLSs.size() == NumComponentLists && "Unexpected amount of list sizes."); std::copy(Components.begin(), Components.end(), getComponentsRef().begin()); } /// Fill the clause information from the list of declarations and /// associated component lists. void setClauseInfo(ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists) { // Perform some checks to make sure the data sizes are consistent with the // information available when the clause was created. assert(getUniqueDeclarationsTotalNumber(Declarations) == NumUniqueDeclarations && "Unexpected number of mappable expression info entries!"); assert(getComponentsTotalNumber(ComponentLists) == NumComponents && "Unexpected total number of components!"); assert(Declarations.size() == ComponentLists.size() && "Declaration and component lists size is not consistent!"); assert(Declarations.size() == NumComponentLists && "Unexpected declaration and component lists size!"); // Organize the components by declaration and retrieve the original // expression. Original expressions are always the first component of the // mappable component list. llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>> ComponentListMap; { auto CI = ComponentLists.begin(); for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE; ++DI, ++CI) { assert(!CI->empty() && "Invalid component list!"); ComponentListMap[*DI].push_back(*CI); } } // Iterators of the target storage. auto UniqueDeclarations = getUniqueDeclsRef(); auto UDI = UniqueDeclarations.begin(); auto DeclNumLists = getDeclNumListsRef(); auto DNLI = DeclNumLists.begin(); auto ComponentListSizes = getComponentListSizesRef(); auto CLSI = ComponentListSizes.begin(); auto Components = getComponentsRef(); auto CI = Components.begin(); // Variable to compute the accumulation of the number of components. unsigned PrevSize = 0u; // Scan all the declarations and associated component lists. for (auto &M : ComponentListMap) { // The declaration. auto *D = M.first; // The component lists. auto CL = M.second; // Initialize the entry. *UDI = D; ++UDI; *DNLI = CL.size(); ++DNLI; // Obtain the cumulative sizes and concatenate all the components in the // reserved storage. for (auto C : CL) { // Accumulate with the previous size. PrevSize += C.size(); // Save the size. *CLSI = PrevSize; ++CLSI; // Append components after the current components iterator. CI = std::copy(C.begin(), C.end(), CI); } } } /// Set the nested name specifier of associated user-defined mapper. void setMapperQualifierLoc(NestedNameSpecifierLoc NNSL) { MapperQualifierLoc = NNSL; } /// Set the name of associated user-defined mapper. void setMapperIdInfo(DeclarationNameInfo MapperId) { MapperIdInfo = MapperId; } /// Get the user-defined mapper references that are in the trailing objects of /// the class. MutableArrayRef<Expr *> getUDMapperRefs() { assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); return llvm::makeMutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Get the user-defined mappers references that are in the trailing objects /// of the class. ArrayRef<Expr *> getUDMapperRefs() const { assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); return llvm::makeArrayRef<Expr *>( static_cast<const T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Set the user-defined mappers that are in the trailing objects of the /// class. void setUDMapperRefs(ArrayRef<Expr *> DMDs) { assert(DMDs.size() == OMPVarListClause<T>::varlist_size() && "Unexpected number of user-defined mappers."); assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); std::copy(DMDs.begin(), DMDs.end(), getUDMapperRefs().begin()); } public: /// Return the number of unique base declarations in this clause. unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; } /// Return the number of lists derived from the clause expressions. unsigned getTotalComponentListNum() const { return NumComponentLists; } /// Return the total number of components in all lists derived from the /// clause. unsigned getTotalComponentsNum() const { return NumComponents; } /// Gets the nested name specifier for associated user-defined mapper. NestedNameSpecifierLoc getMapperQualifierLoc() const { return MapperQualifierLoc; } /// Gets the name info for associated user-defined mapper. const DeclarationNameInfo &getMapperIdInfo() const { return MapperIdInfo; } /// Iterator that browse the components by lists. It also allows /// browsing components of a single declaration. class const_component_lists_iterator : public llvm::iterator_adaptor_base< const_component_lists_iterator, MappableExprComponentListRef::const_iterator, std::forward_iterator_tag, MappableComponent, ptrdiff_t, MappableComponent, MappableComponent> { // The declaration the iterator currently refers to. ArrayRef<ValueDecl *>::iterator DeclCur; // The list number associated with the current declaration. ArrayRef<unsigned>::iterator NumListsCur; // Whether this clause is possible to have user-defined mappers associated. const bool SupportsMapper; // The user-defined mapper associated with the current declaration. ArrayRef<Expr *>::iterator MapperCur; // Remaining lists for the current declaration. unsigned RemainingLists = 0; // The cumulative size of the previous list, or zero if there is no previous // list. unsigned PrevListSize = 0; // The cumulative sizes of the current list - it will delimit the remaining // range of interest. ArrayRef<unsigned>::const_iterator ListSizeCur; ArrayRef<unsigned>::const_iterator ListSizeEnd; // Iterator to the end of the components storage. MappableExprComponentListRef::const_iterator End; public: /// Construct an iterator that scans all lists. explicit const_component_lists_iterator( ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components, bool SupportsMapper, ArrayRef<Expr *> Mappers) : const_component_lists_iterator::iterator_adaptor_base( Components.begin()), DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()), SupportsMapper(SupportsMapper), ListSizeCur(CumulativeListSizes.begin()), ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) { assert(UniqueDecls.size() == DeclsListNum.size() && "Inconsistent number of declarations and list sizes!"); if (!DeclsListNum.empty()) RemainingLists = *NumListsCur; if (SupportsMapper) MapperCur = Mappers.begin(); } /// Construct an iterator that scan lists for a given declaration \a /// Declaration. explicit const_component_lists_iterator( const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components, bool SupportsMapper, ArrayRef<Expr *> Mappers) : const_component_lists_iterator(UniqueDecls, DeclsListNum, CumulativeListSizes, Components, SupportsMapper, Mappers) { // Look for the desired declaration. While we are looking for it, we // update the state so that we know the component where a given list // starts. for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) { if (*DeclCur == Declaration) break; assert(*NumListsCur > 0 && "No lists associated with declaration??"); // Skip the lists associated with the current declaration, but save the // last list size that was skipped. std::advance(ListSizeCur, *NumListsCur - 1); PrevListSize = *ListSizeCur; ++ListSizeCur; if (SupportsMapper) ++MapperCur; } // If we didn't find any declaration, advance the iterator to after the // last component and set remaining lists to zero. if (ListSizeCur == CumulativeListSizes.end()) { this->I = End; RemainingLists = 0u; return; } // Set the remaining lists with the total number of lists of the current // declaration. RemainingLists = *NumListsCur; // Adjust the list size end iterator to the end of the relevant range. ListSizeEnd = ListSizeCur; std::advance(ListSizeEnd, RemainingLists); // Given that the list sizes are cumulative, the index of the component // that start the list is the size of the previous list. std::advance(this->I, PrevListSize); } // Return the array with the current list. The sizes are cumulative, so the // array size is the difference between the current size and previous one. std::tuple<const ValueDecl *, MappableExprComponentListRef, const ValueDecl *> operator*() const { assert(ListSizeCur != ListSizeEnd && "Invalid iterator!"); const ValueDecl *Mapper = nullptr; if (SupportsMapper && *MapperCur) Mapper = cast<ValueDecl>(cast<DeclRefExpr>(*MapperCur)->getDecl()); return std::make_tuple( *DeclCur, MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize), Mapper); } std::tuple<const ValueDecl *, MappableExprComponentListRef, const ValueDecl *> operator->() const { return **this; } // Skip the components of the current list. const_component_lists_iterator &operator++() { assert(ListSizeCur != ListSizeEnd && RemainingLists && "Invalid iterator!"); // If we don't have more lists just skip all the components. Otherwise, // advance the iterator by the number of components in the current list. if (std::next(ListSizeCur) == ListSizeEnd) { this->I = End; RemainingLists = 0; } else { std::advance(this->I, *ListSizeCur - PrevListSize); PrevListSize = *ListSizeCur; // We are done with a declaration, move to the next one. if (!(--RemainingLists)) { ++DeclCur; ++NumListsCur; if (SupportsMapper) ++MapperCur; RemainingLists = *NumListsCur; assert(RemainingLists && "No lists in the following declaration??"); } } ++ListSizeCur; return *this; } }; using const_component_lists_range = llvm::iterator_range<const_component_lists_iterator>; /// Iterators for all component lists. const_component_lists_iterator component_lists_begin() const { return const_component_lists_iterator( getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef(), SupportsMapper, SupportsMapper ? getUDMapperRefs() : llvm::None); } const_component_lists_iterator component_lists_end() const { return const_component_lists_iterator( ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(), MappableExprComponentListRef(getComponentsRef().end(), getComponentsRef().end()), SupportsMapper, llvm::None); } const_component_lists_range component_lists() const { return {component_lists_begin(), component_lists_end()}; } /// Iterators for component lists associated with the provided /// declaration. const_component_lists_iterator decl_component_lists_begin(const ValueDecl *VD) const { return const_component_lists_iterator( VD, getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef(), SupportsMapper, SupportsMapper ? getUDMapperRefs() : llvm::None); } const_component_lists_iterator decl_component_lists_end() const { return component_lists_end(); } const_component_lists_range decl_component_lists(const ValueDecl *VD) const { return {decl_component_lists_begin(VD), decl_component_lists_end()}; } /// Iterators to access all the declarations, number of lists, list sizes, and /// components. using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator; using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>; const_all_decls_range all_decls() const { auto A = getUniqueDeclsRef(); return const_all_decls_range(A.begin(), A.end()); } using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator; using const_all_num_lists_range = llvm::iterator_range<const_all_num_lists_iterator>; const_all_num_lists_range all_num_lists() const { auto A = getDeclNumListsRef(); return const_all_num_lists_range(A.begin(), A.end()); } using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator; using const_all_lists_sizes_range = llvm::iterator_range<const_all_lists_sizes_iterator>; const_all_lists_sizes_range all_lists_sizes() const { auto A = getComponentListSizesRef(); return const_all_lists_sizes_range(A.begin(), A.end()); } using const_all_components_iterator = ArrayRef<MappableComponent>::iterator; using const_all_components_range = llvm::iterator_range<const_all_components_iterator>; const_all_components_range all_components() const { auto A = getComponentsRef(); return const_all_components_range(A.begin(), A.end()); } using mapperlist_iterator = MutableArrayRef<Expr *>::iterator; using mapperlist_const_iterator = ArrayRef<const Expr *>::iterator; using mapperlist_range = llvm::iterator_range<mapperlist_iterator>; using mapperlist_const_range = llvm::iterator_range<mapperlist_const_iterator>; mapperlist_iterator mapperlist_begin() { return getUDMapperRefs().begin(); } mapperlist_iterator mapperlist_end() { return getUDMapperRefs().end(); } mapperlist_const_iterator mapperlist_begin() const { return getUDMapperRefs().begin(); } mapperlist_const_iterator mapperlist_end() const { return getUDMapperRefs().end(); } mapperlist_range mapperlists() { return mapperlist_range(mapperlist_begin(), mapperlist_end()); } mapperlist_const_range mapperlists() const { return mapperlist_const_range(mapperlist_begin(), mapperlist_end()); } }; /// This represents clause 'map' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target map(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause 'map' /// with the variables 'a' and 'b'. class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>, private llvm::TrailingObjects< OMPMapClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } private: /// Map-type-modifiers for the 'map' clause. OpenMPMapModifierKind MapTypeModifiers[NumberOfOMPMapClauseModifiers] = { OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown}; /// Location of map-type-modifiers for the 'map' clause. SourceLocation MapTypeModifiersLoc[NumberOfOMPMapClauseModifiers]; /// Map type for the 'map' clause. OpenMPMapClauseKind MapType = OMPC_MAP_unknown; /// Is this an implicit map type or not. bool MapTypeIsImplicit = false; /// Location of the map type. SourceLocation MapLoc; /// Colon location. SourceLocation ColonLoc; /// Build a clause for \a NumVars listed expressions, \a /// NumUniqueDeclarations declarations, \a NumComponentLists total component /// lists, and \a NumComponents total expression components. /// /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Locations of map-type-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param MapType Map type. /// \param MapTypeIsImplicit Map type is inferred implicitly. /// \param MapLoc Location of the map type. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, OpenMPMapClauseKind MapType, bool MapTypeIsImplicit, SourceLocation MapLoc, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_map, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo), MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) { assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() && "Unexpected number of map type modifiers."); llvm::copy(MapModifiers, std::begin(MapTypeModifiers)); assert(llvm::array_lengthof(MapTypeModifiersLoc) == MapModifiersLoc.size() && "Unexpected number of map type modifier locations."); llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_map, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set map-type-modifier for the clause. /// /// \param I index for map-type-modifier. /// \param T map-type-modifier for the clause. void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) { assert(I < NumberOfOMPMapClauseModifiers && "Unexpected index to store map type modifier, exceeds array size."); MapTypeModifiers[I] = T; } /// Set location for the map-type-modifier. /// /// \param I index for map-type-modifier location. /// \param TLoc map-type-modifier location. void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMapClauseModifiers && "Index to store map type modifier location exceeds array size."); MapTypeModifiersLoc[I] = TLoc; } /// Set type for the clause. /// /// \param T Type for the clause. void setMapType(OpenMPMapClauseKind T) { MapType = T; } /// Set type location. /// /// \param TLoc Type location. void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Location of map-type-modifiers. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. /// \param Type Map type. /// \param TypeIsImplicit Map type is inferred implicitly. /// \param TypeLoc Location of the map type. static OMPMapClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId, OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc); /// Creates an empty clause with the place for \a NumVars original /// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists /// lists, and \a NumComponents expression components. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPMapClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches mapping kind for the clause. OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; } /// Is this an implicit map type? /// We have to capture 'IsMapTypeImplicit' from the parser for more /// informative error messages. It helps distinguish map(r) from /// map(tofrom: r), which is important to print more helpful error /// messages for some target directives. bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; } /// Fetches the map-type-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for map-type-modifier. OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMapClauseModifiers && "Requested modifier exceeds the total number of modifiers."); return MapTypeModifiers[Cnt]; } /// Fetches the map-type-modifier location at 'Cnt' index of array of /// modifiers' locations. /// /// \param Cnt index for map-type-modifier location. SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMapClauseModifiers && "Requested modifier location exceeds total number of modifiers."); return MapTypeModifiersLoc[Cnt]; } /// Fetches ArrayRef of map-type-modifiers. ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiers); } /// Fetches ArrayRef of location of map-type-modifiers. ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiersLoc); } /// Fetches location of clause mapping kind. SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range( reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPMapClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { if (MapType == OMPC_MAP_to || MapType == OMPC_MAP_tofrom) return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { auto Children = const_cast<OMPMapClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_map; } }; /// This represents 'num_teams' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams num_teams(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'num_teams' /// with single expression 'n'. class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// NumTeams number. Stmt *NumTeams = nullptr; /// Set the NumTeams number. /// /// \param E NumTeams number. void setNumTeams(Expr *E) { NumTeams = E; } public: /// Build 'num_teams' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTeams(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPNumTeamsClause() : OMPClause(llvm::omp::OMPC_num_teams, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return NumTeams number. Expr *getNumTeams() { return cast<Expr>(NumTeams); } /// Return NumTeams number. Expr *getNumTeams() const { return cast<Expr>(NumTeams); } child_range children() { return child_range(&NumTeams, &NumTeams + 1); } const_child_range children() const { return const_child_range(&NumTeams, &NumTeams + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_teams; } }; /// This represents 'thread_limit' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams thread_limit(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'thread_limit' /// with single expression 'n'. class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// ThreadLimit number. Stmt *ThreadLimit = nullptr; /// Set the ThreadLimit number. /// /// \param E ThreadLimit number. void setThreadLimit(Expr *E) { ThreadLimit = E; } public: /// Build 'thread_limit' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPThreadLimitClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_thread_limit, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPThreadLimitClause() : OMPClause(llvm::omp::OMPC_thread_limit, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return ThreadLimit number. Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); } /// Return ThreadLimit number. Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); } child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); } const_child_range children() const { return const_child_range(&ThreadLimit, &ThreadLimit + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_thread_limit; } }; /// This represents 'priority' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task priority(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'priority' with /// single expression 'n'. class OMPPriorityClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Priority number. Stmt *Priority = nullptr; /// Set the Priority number. /// /// \param E Priority number. void setPriority(Expr *E) { Priority = E; } public: /// Build 'priority' clause. /// /// \param Priority Expression associated with this clause. /// \param HelperPriority Helper priority for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPPriorityClause(Expr *Priority, Stmt *HelperPriority, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_priority, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Priority(Priority) { setPreInitStmt(HelperPriority, CaptureRegion); } /// Build an empty clause. OMPPriorityClause() : OMPClause(llvm::omp::OMPC_priority, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return Priority number. Expr *getPriority() { return cast<Expr>(Priority); } /// Return Priority number. Expr *getPriority() const { return cast<Expr>(Priority); } child_range children() { return child_range(&Priority, &Priority + 1); } const_child_range children() const { return const_child_range(&Priority, &Priority + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPPriorityClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_priority; } }; /// This represents 'grainsize' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop grainsize(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'grainsize' /// with single expression '4'. class OMPGrainsizeClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Grainsize = nullptr; /// Set safelen. void setGrainsize(Expr *Size) { Grainsize = Size; } public: /// Build 'grainsize' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPGrainsizeClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_grainsize, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Grainsize(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPGrainsizeClause() : OMPClause(llvm::omp::OMPC_grainsize, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); } child_range children() { return child_range(&Grainsize, &Grainsize + 1); } const_child_range children() const { return const_child_range(&Grainsize, &Grainsize + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPGrainsizeClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_grainsize; } }; /// This represents 'nogroup' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp taskloop nogroup /// \endcode /// In this example directive '#pragma omp taskloop' has 'nogroup' clause. class OMPNogroupClause : public OMPClause { public: /// Build 'nogroup' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nogroup, StartLoc, EndLoc) {} /// Build an empty clause. OMPNogroupClause() : OMPClause(llvm::omp::OMPC_nogroup, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nogroup; } }; /// This represents 'num_tasks' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop num_tasks(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'num_tasks' /// with single expression '4'. class OMPNumTasksClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *NumTasks = nullptr; /// Set safelen. void setNumTasks(Expr *Size) { NumTasks = Size; } public: /// Build 'num_tasks' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNumTasksClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_tasks, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTasks(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPNumTasksClause() : OMPClause(llvm::omp::OMPC_num_tasks, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); } child_range children() { return child_range(&NumTasks, &NumTasks + 1); } const_child_range children() const { return const_child_range(&NumTasks, &NumTasks + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPNumTasksClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_tasks; } }; /// This represents 'hint' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp critical (name) hint(6) /// \endcode /// In this example directive '#pragma omp critical' has name 'name' and clause /// 'hint' with argument '6'. class OMPHintClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Hint expression of the 'hint' clause. Stmt *Hint = nullptr; /// Set hint expression. void setHint(Expr *H) { Hint = H; } public: /// Build 'hint' clause with expression \a Hint. /// /// \param Hint Hint expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc), Hint(Hint) {} /// Build an empty clause. OMPHintClause() : OMPClause(llvm::omp::OMPC_hint, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getHint() const { return cast_or_null<Expr>(Hint); } child_range children() { return child_range(&Hint, &Hint + 1); } const_child_range children() const { return const_child_range(&Hint, &Hint + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_hint; } }; /// This represents 'dist_schedule' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp distribute dist_schedule(static, 3) /// \endcode /// In this example directive '#pragma omp distribute' has 'dist_schedule' /// clause with arguments 'static' and '3'. class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown; /// Start location of the schedule kind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'dist_schedule' clause with schedule kind \a Kind and chunk /// size expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind DistSchedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize) : OMPClause(llvm::omp::OMPC_dist_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); } /// Build an empty clause. explicit OMPDistScheduleClause() : OMPClause(llvm::omp::OMPC_dist_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Get kind of the clause. OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDistScheduleKindLoc() { return KindLoc; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPDistScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_dist_schedule; } }; /// This represents 'defaultmap' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp target defaultmap(tofrom: scalar) /// \endcode /// In this example directive '#pragma omp target' has 'defaultmap' clause of kind /// 'scalar' with modifier 'tofrom'. class OMPDefaultmapClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Modifiers for 'defaultmap' clause. OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown; /// Locations of modifiers. SourceLocation ModifierLoc; /// A kind of the 'defaultmap' clause. OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown; /// Start location of the defaultmap kind in source code. SourceLocation KindLoc; /// Set defaultmap kind. /// /// \param K Defaultmap kind. void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; } /// Set the defaultmap modifier. /// /// \param M Defaultmap modifier. void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) { Modifier = M; } /// Set location of the defaultmap modifier. void setDefaultmapModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set defaultmap kind start location. /// /// \param KLoc Defaultmap kind location. void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } public: /// Build 'defaultmap' clause with defaultmap kind \a Kind /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param EndLoc Ending location of the clause. /// \param Kind Defaultmap kind. /// \param M The modifier applied to 'defaultmap' clause. /// \param MLoc Location of the modifier OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KLoc, SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind, OpenMPDefaultmapClauseModifier M) : OMPClause(llvm::omp::OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {} /// Build an empty clause. explicit OMPDefaultmapClause() : OMPClause(llvm::omp::OMPC_defaultmap, SourceLocation(), SourceLocation()) {} /// Get kind of the clause. OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; } /// Get the modifier of the clause. OpenMPDefaultmapClauseModifier getDefaultmapModifier() const { return Modifier; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDefaultmapKindLoc() { return KindLoc; } /// Get the modifier location. SourceLocation getDefaultmapModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_defaultmap; } }; /// This represents clause 'to' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update to(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' /// with the variables 'a' and 'b'. class OMPToClause final : public OMPMappableExprListClause<OMPToClause>, private llvm::TrailingObjects< OMPToClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Motion-modifiers for the 'to' clause. OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = { OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown}; /// Location of motion-modifiers for the 'to' clause. SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers]; /// Colon location. SourceLocation ColonLoc; /// Build clause with number of variables \a NumVars. /// /// \param TheMotionModifiers Motion-modifiers. /// \param TheMotionModifiersLoc Locations of motion-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers, ArrayRef<SourceLocation> TheMotionModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_to, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo) { assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() && "Unexpected number of motion modifiers."); llvm::copy(TheMotionModifiers, std::begin(MotionModifiers)); assert(llvm::array_lengthof(MotionModifiersLoc) == TheMotionModifiersLoc.size() && "Unexpected number of motion modifier locations."); llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_to, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set motion-modifier for the clause. /// /// \param I index for motion-modifier. /// \param T motion-modifier for the clause. void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) { assert(I < NumberOfOMPMotionModifiers && "Unexpected index to store motion modifier, exceeds array size."); MotionModifiers[I] = T; } /// Set location for the motion-modifier. /// /// \param I index for motion-modifier location. /// \param TLoc motion-modifier location. void setMotionModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMotionModifiers && "Index to store motion modifier location exceeds array size."); MotionModifiersLoc[I] = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param MotionModifiers Motion-modifiers. /// \param MotionModifiersLoc Location of motion-modifiers. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPToClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPToClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches the motion-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for motion-modifier. OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier exceeds the total number of modifiers."); return MotionModifiers[Cnt]; } /// Fetches the motion-modifier location at 'Cnt' index of array of modifiers' /// locations. /// /// \param Cnt index for motion-modifier location. SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier location exceeds total number of modifiers."); return MotionModifiersLoc[Cnt]; } /// Fetches ArrayRef of motion-modifiers. ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiers); } /// Fetches ArrayRef of location of motion-modifiers. ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiersLoc); } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPToClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_to; } }; /// This represents clause 'from' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update from(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'from' /// with the variables 'a' and 'b'. class OMPFromClause final : public OMPMappableExprListClause<OMPFromClause>, private llvm::TrailingObjects< OMPFromClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Motion-modifiers for the 'from' clause. OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = { OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown}; /// Location of motion-modifiers for the 'from' clause. SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers]; /// Colon location. SourceLocation ColonLoc; /// Build clause with number of variables \a NumVars. /// /// \param TheMotionModifiers Motion-modifiers. /// \param TheMotionModifiersLoc Locations of motion-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers, ArrayRef<SourceLocation> TheMotionModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_from, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo) { assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() && "Unexpected number of motion modifiers."); llvm::copy(TheMotionModifiers, std::begin(MotionModifiers)); assert(llvm::array_lengthof(MotionModifiersLoc) == TheMotionModifiersLoc.size() && "Unexpected number of motion modifier locations."); llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_from, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set motion-modifier for the clause. /// /// \param I index for motion-modifier. /// \param T motion-modifier for the clause. void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) { assert(I < NumberOfOMPMotionModifiers && "Unexpected index to store motion modifier, exceeds array size."); MotionModifiers[I] = T; } /// Set location for the motion-modifier. /// /// \param I index for motion-modifier location. /// \param TLoc motion-modifier location. void setMotionModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMotionModifiers && "Index to store motion modifier location exceeds array size."); MotionModifiersLoc[I] = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param MotionModifiers Motion-modifiers. /// \param MotionModifiersLoc Location of motion-modifiers. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPFromClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPFromClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches the motion-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for motion-modifier. OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier exceeds the total number of modifiers."); return MotionModifiers[Cnt]; } /// Fetches the motion-modifier location at 'Cnt' index of array of modifiers' /// locations. /// /// \param Cnt index for motion-modifier location. SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier location exceeds total number of modifiers."); return MotionModifiersLoc[Cnt]; } /// Fetches ArrayRef of motion-modifiers. ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiers); } /// Fetches ArrayRef of location of motion-modifiers. ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiersLoc); } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFromClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_from; } }; /// This represents clause 'use_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_ptr' with the variables 'a' and 'b'. class OMPUseDevicePtrClause final : public OMPMappableExprListClause<OMPUseDevicePtrClause>, private llvm::TrailingObjects< OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, Locs, Sizes) { } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return 3 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// Sets the list of references to private copies with initializers for new /// private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for new /// private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new private /// variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new private /// variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param PrivateVars Expressions referring to private copies. /// \param Inits Expressions referring to private copy initializers. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use_device_ptr; } }; /// This represents clause 'use_device_addr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_addr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_addr' with the variables 'a' and 'b'. class OMPUseDeviceAddrClause final : public OMPMappableExprListClause<OMPUseDeviceAddrClause>, private llvm::TrailingObjects< OMPUseDeviceAddrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDeviceAddrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDeviceAddrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDeviceAddrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDeviceAddrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDeviceAddrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use_device_addr; } }; /// This represents clause 'is_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target is_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause /// 'is_device_ptr' with the variables 'a' and 'b'. class OMPIsDevicePtrClause final : public OMPMappableExprListClause<OMPIsDevicePtrClause>, private llvm::TrailingObjects< OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPIsDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPIsDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPIsDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_is_device_ptr; } }; /// This represents clause 'nontemporal' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp simd nontemporal(a) /// \endcode /// In this example directive '#pragma omp simd' has clause 'nontemporal' for /// the variable 'a'. class OMPNontemporalClause final : public OMPVarListClause<OMPNontemporalClause>, private llvm::TrailingObjects<OMPNontemporalClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPNontemporalClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPNontemporalClause>(llvm::omp::OMPC_nontemporal, StartLoc, LParenLoc, EndLoc, N) { } /// Build an empty clause. /// /// \param N Number of variables. explicit OMPNontemporalClause(unsigned N) : OMPVarListClause<OMPNontemporalClause>( llvm::omp::OMPC_nontemporal, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Get the list of privatied copies if the member expression was captured by /// one of the privatization clauses. MutableArrayRef<Expr *> getPrivateRefs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateRefs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPNontemporalClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPNontemporalClause *CreateEmpty(const ASTContext &C, unsigned N); /// Sets the list of references to private copies created in private clauses. /// \param VL List of references. void setPrivateRefs(ArrayRef<Expr *> VL); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPNontemporalClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range private_refs() { return child_range(reinterpret_cast<Stmt **>(getPrivateRefs().begin()), reinterpret_cast<Stmt **>(getPrivateRefs().end())); } const_child_range private_refs() const { auto Children = const_cast<OMPNontemporalClause *>(this)->private_refs(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nontemporal; } }; /// This represents 'order' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp simd order(concurrent) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'order' /// clause with kind 'concurrent'. class OMPOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. OpenMPOrderClauseKind Kind = OMPC_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Argument of clause. void setKind(OpenMPOrderClauseKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'order' clause with argument \p A ('concurrent'). /// /// \param A Argument of the clause ('concurrent'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderClause(OpenMPOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPOrderClause() : OMPClause(llvm::omp::OMPC_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPOrderClauseKind getKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_order; } }; /// This represents 'destroy' clause in the '#pragma omp depobj' /// directive. /// /// \code /// #pragma omp depobj(a) destroy /// \endcode /// In this example directive '#pragma omp depobj' has 'destroy' clause. class OMPDestroyClause final : public OMPClause { public: /// Build 'destroy' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_destroy, StartLoc, EndLoc) {} /// Build an empty clause. OMPDestroyClause() : OMPClause(llvm::omp::OMPC_destroy, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_destroy; } }; /// This represents 'detach' clause in the '#pragma omp task' directive. /// /// \code /// #pragma omp task detach(evt) /// \endcode /// In this example directive '#pragma omp detach' has simple 'detach' clause /// with the variable 'evt'. class OMPDetachClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression of the 'detach' clause. Stmt *Evt = nullptr; /// Set condition. void setEventHandler(Expr *E) { Evt = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'detach' clause with event-handler \a Evt. /// /// \param Evt Event handler expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_detach, StartLoc, EndLoc), LParenLoc(LParenLoc), Evt(Evt) {} /// Build an empty clause. OMPDetachClause() : OMPClause(llvm::omp::OMPC_detach, SourceLocation(), SourceLocation()) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns event-handler expression. Expr *getEventHandler() const { return cast_or_null<Expr>(Evt); } child_range children() { return child_range(&Evt, &Evt + 1); } const_child_range children() const { return const_child_range(&Evt, &Evt + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_detach; } }; /// This represents clause 'inclusive' in the '#pragma omp scan' directive. /// /// \code /// #pragma omp scan inclusive(a,b) /// \endcode /// In this example directive '#pragma omp scan' has clause 'inclusive' /// with the variables 'a' and 'b'. class OMPInclusiveClause final : public OMPVarListClause<OMPInclusiveClause>, private llvm::TrailingObjects<OMPInclusiveClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPInclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInclusiveClause(unsigned N) : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. static OMPInclusiveClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInclusiveClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInclusiveClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_inclusive; } }; /// This represents clause 'exclusive' in the '#pragma omp scan' directive. /// /// \code /// #pragma omp scan exclusive(a,b) /// \endcode /// In this example directive '#pragma omp scan' has clause 'exclusive' /// with the variables 'a' and 'b'. class OMPExclusiveClause final : public OMPVarListClause<OMPExclusiveClause>, private llvm::TrailingObjects<OMPExclusiveClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPExclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPExclusiveClause(unsigned N) : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. static OMPExclusiveClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPExclusiveClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPExclusiveClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_exclusive; } }; /// This represents clause 'uses_allocators' in the '#pragma omp target'-based /// directives. /// /// \code /// #pragma omp target uses_allocators(default_allocator, my_allocator(traits)) /// \endcode /// In this example directive '#pragma omp target' has clause 'uses_allocators' /// with the allocators 'default_allocator' and user-defined 'my_allocator'. class OMPUsesAllocatorsClause final : public OMPClause, private llvm::TrailingObjects<OMPUsesAllocatorsClause, Expr *, SourceLocation> { public: /// Data for list of allocators. struct Data { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; private: friend class OMPClauseReader; friend TrailingObjects; enum class ExprOffsets { Allocator, AllocatorTraits, Total, }; enum class ParenLocsOffsets { LParen, RParen, Total, }; /// Location of '('. SourceLocation LParenLoc; /// Total number of allocators in the clause. unsigned NumOfAllocators = 0; /// Build clause. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of allocators asssociated with the clause. OMPUsesAllocatorsClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, StartLoc, EndLoc), LParenLoc(LParenLoc), NumOfAllocators(N) {} /// Build an empty clause. /// \param N Number of allocators asssociated with the clause. /// explicit OMPUsesAllocatorsClause(unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, SourceLocation(), SourceLocation()), NumOfAllocators(N) {} unsigned numTrailingObjects(OverloadToken<Expr *>) const { return NumOfAllocators * static_cast<int>(ExprOffsets::Total); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Sets the allocators data for the clause. void setAllocatorsData(ArrayRef<OMPUsesAllocatorsClause::Data> Data); public: /// Creates clause with a list of allocators \p Data. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param Data List of allocators. static OMPUsesAllocatorsClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<OMPUsesAllocatorsClause::Data> Data); /// Creates an empty clause with the place for \p N allocators. /// /// \param C AST context. /// \param N The number of allocators. static OMPUsesAllocatorsClause *CreateEmpty(const ASTContext &C, unsigned N); /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of allocators associated with the clause. unsigned getNumberOfAllocators() const { return NumOfAllocators; } /// Returns data for the specified allocator. OMPUsesAllocatorsClause::Data getAllocatorData(unsigned I) const; // Iterators child_range children() { Stmt **Begin = reinterpret_cast<Stmt **>(getTrailingObjects<Expr *>()); return child_range(Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total)); } const_child_range children() const { Stmt *const *Begin = reinterpret_cast<Stmt *const *>(getTrailingObjects<Expr *>()); return const_child_range( Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total)); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_uses_allocators; } }; /// This represents clause 'affinity' in the '#pragma omp task'-based /// directives. /// /// \code /// #pragma omp task affinity(iterator(i = 0:n) : ([3][n])a, b[:n], c[i]) /// \endcode /// In this example directive '#pragma omp task' has clause 'affinity' with the /// affinity modifer 'iterator(i = 0:n)' and locator items '([3][n])a', 'b[:n]' /// and 'c[i]'. class OMPAffinityClause final : public OMPVarListClause<OMPAffinityClause>, private llvm::TrailingObjects<OMPAffinityClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':' symbol. SourceLocation ColonLoc; /// Build clause. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param N Number of locators asssociated with the clause. OMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// \param N Number of locators asssociated with the clause. /// explicit OMPAffinityClause(unsigned N) : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the affinity modifier for the clause, if any. void setModifier(Expr *E) { getTrailingObjects<Expr *>()[varlist_size()] = E; } /// Sets the location of ':' symbol. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a modifier a list of locator items. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param Locators List of locator items. static OMPAffinityClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// Creates an empty clause with the place for \p N locator items. /// /// \param C AST context. /// \param N The number of locator items. static OMPAffinityClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets affinity modifier. Expr *getModifier() { return getTrailingObjects<Expr *>()[varlist_size()]; } Expr *getModifier() const { return getTrailingObjects<Expr *>()[varlist_size()]; } /// Gets the location of ':' symbol. SourceLocation getColonLoc() const { return ColonLoc; } // Iterators child_range children() { int Offset = getModifier() ? 1 : 0; return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end() + Offset)); } const_child_range children() const { auto Children = const_cast<OMPAffinityClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_affinity; } }; /// This class implements a simple visitor for OMPClause /// subclasses. template<class ImplClass, template <typename> class Ptr, typename RetTy> class OMPClauseVisitorBase { public: #define PTR(CLASS) Ptr<CLASS> #define DISPATCH(CLASS) \ return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S)) #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) \ RetTy Visit##Class(PTR(Class) S) { DISPATCH(Class); } #include "llvm/Frontend/OpenMP/OMP.inc" RetTy Visit(PTR(OMPClause) S) { // Top switch clause: visit each OMPClause. switch (S->getClauseKind()) { #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) \ case llvm::omp::Clause::Enum: \ return Visit##Class(static_cast<PTR(Class)>(S)); #define CLAUSE_NO_CLASS(Enum, Str) \ case llvm::omp::Clause::Enum: \ break; #include "llvm/Frontend/OpenMP/OMP.inc" } } // Base case, ignore it. :) RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); } #undef PTR #undef DISPATCH }; template <typename T> using const_ptr = std::add_pointer_t<std::add_const_t<T>>; template <class ImplClass, typename RetTy = void> class OMPClauseVisitor : public OMPClauseVisitorBase<ImplClass, std::add_pointer_t, RetTy> {}; template<class ImplClass, typename RetTy = void> class ConstOMPClauseVisitor : public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {}; class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> { raw_ostream &OS; const PrintingPolicy &Policy; /// Process clauses with list of variables. template <typename T> void VisitOMPClauseList(T *Node, char StartSym); /// Process motion clauses. template <typename T> void VisitOMPMotionClause(T *Node); public: OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy) : OS(OS), Policy(Policy) {} #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(Class *S); #include "llvm/Frontend/OpenMP/OMP.inc" }; struct OMPTraitProperty { llvm::omp::TraitProperty Kind = llvm::omp::TraitProperty::invalid; /// The raw string as we parsed it. This is needed for the `isa` trait set /// (which accepts anything) and (later) extensions. StringRef RawString; }; struct OMPTraitSelector { Expr *ScoreOrCondition = nullptr; llvm::omp::TraitSelector Kind = llvm::omp::TraitSelector::invalid; llvm::SmallVector<OMPTraitProperty, 1> Properties; }; struct OMPTraitSet { llvm::omp::TraitSet Kind = llvm::omp::TraitSet::invalid; llvm::SmallVector<OMPTraitSelector, 2> Selectors; }; /// Helper data structure representing the traits in a match clause of an /// `declare variant` or `metadirective`. The outer level is an ordered /// collection of selector sets, each with an associated kind and an ordered /// collection of selectors. A selector has a kind, an optional score/condition, /// and an ordered collection of properties. class OMPTraitInfo { /// Private constructor accesible only by ASTContext. OMPTraitInfo() {} friend class ASTContext; public: /// Reconstruct a (partial) OMPTraitInfo object from a mangled name. OMPTraitInfo(StringRef MangledName); /// The outermost level of selector sets. llvm::SmallVector<OMPTraitSet, 2> Sets; bool anyScoreOrCondition( llvm::function_ref<bool(Expr *&, bool /* IsScore */)> Cond) { return llvm::any_of(Sets, [&](OMPTraitSet &Set) { return llvm::any_of( Set.Selectors, [&](OMPTraitSelector &Selector) { return Cond(Selector.ScoreOrCondition, /* IsScore */ Selector.Kind != llvm::omp::TraitSelector::user_condition); }); }); } /// Create a variant match info object from this trait info object. While the /// former is a flat representation the actual main difference is that the /// latter uses clang::Expr to store the score/condition while the former is /// independent of clang. Thus, expressions and conditions are evaluated in /// this method. void getAsVariantMatchInfo(ASTContext &ASTCtx, llvm::omp::VariantMatchInfo &VMI) const; /// Return a string representation identifying this context selector. std::string getMangledName() const; /// Check the extension trait \p TP is active. bool isExtensionActive(llvm::omp::TraitProperty TP) { for (const OMPTraitSet &Set : Sets) { if (Set.Kind != llvm::omp::TraitSet::implementation) continue; for (const OMPTraitSelector &Selector : Set.Selectors) { if (Selector.Kind != llvm::omp::TraitSelector::implementation_extension) continue; for (const OMPTraitProperty &Property : Selector.Properties) { if (Property.Kind == TP) return true; } } } return false; } /// Print a human readable representation into \p OS. void print(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const; }; llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo &TI); llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo *TI); /// Clang specific specialization of the OMPContext to lookup target features. struct TargetOMPContext final : public llvm::omp::OMPContext { TargetOMPContext(ASTContext &ASTCtx, std::function<void(StringRef)> &&DiagUnknownTrait, const FunctionDecl *CurrentFunctionDecl); virtual ~TargetOMPContext() = default; /// See llvm::omp::OMPContext::matchesISATrait bool matchesISATrait(StringRef RawString) const override; private: std::function<bool(StringRef)> FeatureValidityCheck; std::function<void(StringRef)> DiagUnknownTrait; llvm::StringMap<bool> FeatureMap; }; /// Contains data for OpenMP directives: clauses, children /// expressions/statements (helpers for codegen) and associated statement, if /// any. class OMPChildren final : private llvm::TrailingObjects<OMPChildren, OMPClause *, Stmt *> { friend TrailingObjects; friend class OMPClauseReader; friend class OMPExecutableDirective; template <typename T> friend class OMPDeclarativeDirective; /// Numbers of clauses. unsigned NumClauses = 0; /// Number of child expressions/stmts. unsigned NumChildren = 0; /// true if the directive has associated statement. bool HasAssociatedStmt = false; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<OMPClause *>) const { return NumClauses; } OMPChildren() = delete; OMPChildren(unsigned NumClauses, unsigned NumChildren, bool HasAssociatedStmt) : NumClauses(NumClauses), NumChildren(NumChildren), HasAssociatedStmt(HasAssociatedStmt) {} static size_t size(unsigned NumClauses, bool HasAssociatedStmt, unsigned NumChildren); static OMPChildren *Create(void *Mem, ArrayRef<OMPClause *> Clauses); static OMPChildren *Create(void *Mem, ArrayRef<OMPClause *> Clauses, Stmt *S, unsigned NumChildren = 0); static OMPChildren *CreateEmpty(void *Mem, unsigned NumClauses, bool HasAssociatedStmt = false, unsigned NumChildren = 0); public: unsigned getNumClauses() const { return NumClauses; } unsigned getNumChildren() const { return NumChildren; } bool hasAssociatedStmt() const { return HasAssociatedStmt; } /// Set associated statement. void setAssociatedStmt(Stmt *S) { getTrailingObjects<Stmt *>()[NumChildren] = S; } void setChildren(ArrayRef<Stmt *> Children); /// Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { return const_cast<OMPChildren *>(this)->getAssociatedStmt(); } Stmt *getAssociatedStmt() { assert(HasAssociatedStmt && "Expected directive with the associated statement."); return getTrailingObjects<Stmt *>()[NumChildren]; } /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { return llvm::makeMutableArrayRef(getTrailingObjects<OMPClause *>(), NumClauses); } ArrayRef<OMPClause *> getClauses() const { return const_cast<OMPChildren *>(this)->getClauses(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. /// /// \param RegionKind Component region kind. const CapturedStmt * getCapturedStmt(OpenMPDirectiveKind RegionKind, ArrayRef<OpenMPDirectiveKind> CaptureRegions) const { assert(llvm::any_of( CaptureRegions, [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) && "RegionKind not found in OpenMP CaptureRegions."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (auto ThisCaptureRegion : CaptureRegions) { if (ThisCaptureRegion == RegionKind) return CS; CS = cast<CapturedStmt>(CS->getCapturedStmt()); } llvm_unreachable("Incorrect RegionKind specified for directive."); } /// Get innermost captured statement for the construct. CapturedStmt * getInnermostCapturedStmt(ArrayRef<OpenMPDirectiveKind> CaptureRegions) { assert(hasAssociatedStmt() && "Must have associated captured statement."); assert(!CaptureRegions.empty() && "At least one captured statement must be provided."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (unsigned Level = CaptureRegions.size(); Level > 1; --Level) CS = cast<CapturedStmt>(CS->getCapturedStmt()); return CS; } const CapturedStmt * getInnermostCapturedStmt(ArrayRef<OpenMPDirectiveKind> CaptureRegions) const { return const_cast<OMPChildren *>(this)->getInnermostCapturedStmt( CaptureRegions); } MutableArrayRef<Stmt *> getChildren(); ArrayRef<Stmt *> getChildren() const { return const_cast<OMPChildren *>(this)->getChildren(); } Stmt *getRawStmt() { assert(HasAssociatedStmt && "Expected directive with the associated statement."); if (auto *CS = dyn_cast<CapturedStmt>(getAssociatedStmt())) { Stmt *S = nullptr; do { S = CS->getCapturedStmt(); CS = dyn_cast<CapturedStmt>(S); } while (CS); return S; } return getAssociatedStmt(); } const Stmt *getRawStmt() const { return const_cast<OMPChildren *>(this)->getRawStmt(); } Stmt::child_range getAssociatedStmtAsRange() { if (!HasAssociatedStmt) return Stmt::child_range(Stmt::child_iterator(), Stmt::child_iterator()); return Stmt::child_range(&getTrailingObjects<Stmt *>()[NumChildren], &getTrailingObjects<Stmt *>()[NumChildren + 1]); } }; } // namespace clang #endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
ch_ompss.c
#include "../common/ch_common.h" #include "../timing.h" // #include "../timing_override.h" void cholesky_mpi(const int ts, const int nt, double * SPEC_RESTRICT A[nt][nt], double * SPEC_RESTRICT B, double * SPEC_RESTRICT C[nt], int *block_rank) { TASK_DEPTH_INIT; #if defined(CHAMELEON) || defined(CHAMELEON_TARGET) #pragma omp parallel { chameleon_thread_init(); } // necessary to be aware of binary base addresses to calculate offset for target entry functions chameleon_determine_base_addresses((void *)&cholesky_mpi); chameleon_post_init_serial(); void* literal_ts = *(void**)(&ts); #endif #ifdef USE_TIMING #if !defined(OMPSS_VER) #pragma omp parallel #pragma omp master #endif INIT_TIMING(omp_get_num_threads()); #endif START_TIMING(TIME_TOTAL); #if !defined(OMPSS_VER) #pragma omp parallel { #pragma omp single nowait #endif { { START_TIMING(TIME_CREATE); for (int k = 0; k < nt; k++) { double * SPEC_RESTRICT tmp_a_k_k = A[k][k]; if (block_rank[k*nt+k] == mype) { #pragma omp task depend(out: A[k][k]) firstprivate(k) { TASK_DEPTH_INCR; DEBUG_PRINT("Computing omp_potrf[%03d][%03d] - Start\n", k, k); #if CHAMELEON chameleon_map_data_entry_t* args = (chameleon_map_data_entry_t*) malloc(3*sizeof(chameleon_map_data_entry_t)); args[0] = chameleon_map_data_entry_create(tmp_a_k_k, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_FROM); args[1] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); args[2] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); cham_migratable_task_t *cur_task = chameleon_create_task((void *)&omp_potrf, 3, args); int32_t res = chameleon_add_task(cur_task); free(args); TYPE_TASK_ID tmp_id = chameleon_get_last_local_task_id_added(); while(!chameleon_local_task_has_finished(tmp_id)) { chameleon_taskyield(); } #else omp_potrf(tmp_a_k_k, ts, ts); #endif DEBUG_PRINT("Computing omp_potrf[%03d][%03d] - End\n", k, k); TASK_DEPTH_DECR; } } int comm_sentinel = 0; // <-- sentinel, never actual referenced if (block_rank[k*nt+k] == mype && np != 1) { // use comm_sentinel to make sure this task runs before the communication tasks below #pragma omp task depend(in: A[k][k], comm_sentinel) firstprivate(k) untied { TASK_DEPTH_INCR; DEBUG_PRINT("Sending diagonal[%03d][%03d] - Start\n", k, k); START_TIMING(TIME_COMM); MPI_Request *reqs = NULL; int nreqs = 0; char send_flags[np]; reset_send_flags(send_flags); for (int kk = k+1; kk < nt; kk++) { if (!send_flags[block_rank[k*nt+kk]]) { ++nreqs; send_flags[block_rank[k*nt+kk]] = 1; } } reqs = malloc(sizeof(MPI_Request)*nreqs); nreqs = 0; for (int dst = 0; dst < np; dst++) { if (send_flags[dst] && dst != mype) { MPI_Request send_req; MPI_Isend(A[k][k], ts*ts, MPI_DOUBLE, dst, k*nt+k, MPI_COMM_WORLD, &send_req); reqs[nreqs++] = send_req; } } waitall(reqs, nreqs); free(reqs); END_TIMING(TIME_COMM); DEBUG_PRINT("Sending diagonal[%03d][%03d] - End\n", k, k); TASK_DEPTH_DECR; } } else if (block_rank[k*nt+k] != mype) { // use comm_sentinel to make sure this task runs before the communication tasks below #pragma omp task depend(out: B) depend(in:comm_sentinel) firstprivate(k) untied { TASK_DEPTH_INCR; DEBUG_PRINT("Receiving diagonal[%03d][%03d] - Start\n", k, k); START_TIMING(TIME_COMM); int recv_flag = 0; for (int i = k + 1; i < nt; i++) { if (block_rank[k*nt+i] == mype) { recv_flag = 1; break; } } if (recv_flag) { MPI_Request recv_req; MPI_Irecv(B, ts*ts, MPI_DOUBLE, block_rank[k*nt+k], k*nt+k, MPI_COMM_WORLD, &recv_req); waitall(&recv_req, 1); } END_TIMING(TIME_COMM); DEBUG_PRINT("Receiving diagonal[%03d][%03d] - End\n", k, k); TASK_DEPTH_DECR; } } double * SPEC_RESTRICT tmp_b = B; for (int i = k + 1; i < nt; i++) { double * SPEC_RESTRICT tmp_a_k_i = A[k][i]; if (block_rank[k*nt+i] == mype) { if (block_rank[k*nt+k] == mype) { #pragma omp task depend(in: A[k][k], comm_sentinel) depend(out: A[k][i]) firstprivate(k, i) { TASK_DEPTH_INCR; DEBUG_PRINT("Computing omp_trsm[%03d][%03d] - Start\n", k, i); #if CHAMELEON chameleon_map_data_entry_t* args = (chameleon_map_data_entry_t*) malloc(4*sizeof(chameleon_map_data_entry_t)); args[0] = chameleon_map_data_entry_create(tmp_a_k_k, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[1] = chameleon_map_data_entry_create(tmp_a_k_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_FROM); args[2] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); args[3] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); cham_migratable_task_t *cur_task = chameleon_create_task((void *)&omp_trsm, 4, args); int32_t res = chameleon_add_task(cur_task); free(args); TYPE_TASK_ID tmp_id = chameleon_get_last_local_task_id_added(); while(!chameleon_local_task_has_finished(tmp_id)) { chameleon_taskyield(); } #else omp_trsm(tmp_a_k_k, tmp_a_k_i, ts, ts); #endif DEBUG_PRINT("Computing omp_trsm[%03d][%03d] - End\n", k, i); TASK_DEPTH_DECR; } } else { #pragma omp task depend(in: B, comm_sentinel) depend(out: A[k][i]) firstprivate(k, i) { TASK_DEPTH_INCR; DEBUG_PRINT("Computing omp_trsm[%03d][%03d] - Start\n", k, i); #if CHAMELEON chameleon_map_data_entry_t* args = (chameleon_map_data_entry_t*) malloc(4*sizeof(chameleon_map_data_entry_t)); args[0] = chameleon_map_data_entry_create(tmp_b, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[1] = chameleon_map_data_entry_create(tmp_a_k_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_FROM); args[2] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); args[3] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); cham_migratable_task_t *cur_task = chameleon_create_task((void *)&omp_trsm, 4, args); int32_t res = chameleon_add_task(cur_task); free(args); TYPE_TASK_ID tmp_id = chameleon_get_last_local_task_id_added(); while(!chameleon_local_task_has_finished(tmp_id)) { chameleon_taskyield(); } #else omp_trsm(tmp_b, tmp_a_k_i, ts, ts); #endif DEBUG_PRINT("Computing omp_trsm[%03d][%03d] - End\n", k, i); TASK_DEPTH_DECR; } } } } #pragma omp task depend(inout: comm_sentinel) firstprivate(k) shared(A) untied { TASK_DEPTH_INCR; DEBUG_PRINT("Send/Recv omp_trsm[%03d] - Start\n", k); START_TIMING(TIME_COMM); char send_flags[np]; reset_send_flags(send_flags); int nreqs = 0; // upper bound in case all our blocks have to be sent int max_req = (nt-k)*(np-1); MPI_Request *reqs = malloc(sizeof(*reqs)*max_req); for (int i = k + 1; i < nt; i++) { if (block_rank[k*nt+i] == mype && np != 1) { for (int ii = k + 1; ii < i; ii++) { if (!send_flags[block_rank[ii*nt+i]]) { send_flags[block_rank[ii*nt+i]] = 1; } } for (int ii = i + 1; ii < nt; ii++) { if (!send_flags[block_rank[i*nt+ii]]) { send_flags[block_rank[i*nt+ii]] = 1; } } if (!send_flags[block_rank[i*nt+i]]) send_flags[block_rank[i*nt+i]] = 1; for (int dst = 0; dst < np; dst++) { if (send_flags[dst] && dst != mype) { MPI_Request send_req; MPI_Isend(A[k][i], ts*ts, MPI_DOUBLE, dst, k*nt+i, MPI_COMM_WORLD, &send_req); reqs[nreqs++] = send_req; } } reset_send_flags(send_flags); } if (block_rank[k*nt+i] != mype) { int recv_flag = 0; for (int ii = k + 1; ii < i; ii++) { if (block_rank[ii*nt+i] == mype) recv_flag = 1; } for (int ii = i + 1; ii < nt; ii++) { if (block_rank[i*nt+ii] == mype) recv_flag = 1; } if (block_rank[i*nt+i] == mype) recv_flag = 1; if (recv_flag) { MPI_Request recv_req; MPI_Irecv(C[i], ts*ts, MPI_DOUBLE, block_rank[k*nt+i], k*nt+i, MPI_COMM_WORLD, &recv_req); reqs[nreqs++] = recv_req; } } } waitall(reqs, nreqs); free(reqs); END_TIMING(TIME_COMM); DEBUG_PRINT("Send/Recv omp_trsm[%03d] - End\n", k); TASK_DEPTH_DECR; } for (int i = k + 1; i < nt; i++) { double * SPEC_RESTRICT tmp_a_k_i = A[k][i]; double * SPEC_RESTRICT tmp_a_i_i = A[i][i]; double * SPEC_RESTRICT tmp_c_i = C[i]; for (int j = k + 1; j < i; j++) { double * SPEC_RESTRICT tmp_a_k_j = A[k][j]; double * SPEC_RESTRICT tmp_a_j_i = A[j][i]; double * SPEC_RESTRICT tmp_c_j = C[j]; if (block_rank[j*nt+i] == mype) { if (block_rank[k*nt+i] == mype && block_rank[k*nt+j] == mype) { #pragma omp task depend(in: A[k][i], A[k][j], comm_sentinel) depend(out: A[j][i]) firstprivate(k, j, i) { TASK_DEPTH_INCR; DEBUG_PRINT("Computing omp_gemm[%03d][%03d] - Start\n", j, i); #if CHAMELEON chameleon_map_data_entry_t* args = (chameleon_map_data_entry_t*) malloc(5*sizeof(chameleon_map_data_entry_t)); args[0] = chameleon_map_data_entry_create(tmp_a_k_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[1] = chameleon_map_data_entry_create(tmp_a_k_j, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[2] = chameleon_map_data_entry_create(tmp_a_j_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_FROM); args[3] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); args[4] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); cham_migratable_task_t *cur_task = chameleon_create_task((void *)&omp_gemm, 5, args); int32_t res = chameleon_add_task(cur_task); free(args); TYPE_TASK_ID tmp_id = chameleon_get_last_local_task_id_added(); while(!chameleon_local_task_has_finished(tmp_id)) { chameleon_taskyield(); } #else omp_gemm(tmp_a_k_i, tmp_a_k_j, tmp_a_j_i, ts, ts); #endif DEBUG_PRINT("Computing omp_gemm[%03d][%03d] - End\n", j, i); TASK_DEPTH_DECR; } } else if (block_rank[k*nt+i] != mype && block_rank[k*nt+j] == mype) { #pragma omp task depend(in: A[k][j], comm_sentinel) depend(out: A[j][i]) firstprivate(k, j, i) { TASK_DEPTH_INCR; DEBUG_PRINT("Computing omp_gemm[%03d][%03d] - Start\n", j, i); #if CHAMELEON chameleon_map_data_entry_t* args = (chameleon_map_data_entry_t*) malloc(5*sizeof(chameleon_map_data_entry_t)); args[0] = chameleon_map_data_entry_create(tmp_c_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[1] = chameleon_map_data_entry_create(tmp_a_k_j, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[2] = chameleon_map_data_entry_create(tmp_a_j_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_FROM); args[3] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); args[4] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); cham_migratable_task_t *cur_task = chameleon_create_task((void *)&omp_gemm, 5, args); int32_t res = chameleon_add_task(cur_task); free(args); TYPE_TASK_ID tmp_id = chameleon_get_last_local_task_id_added(); while(!chameleon_local_task_has_finished(tmp_id)) { chameleon_taskyield(); } #else omp_gemm(tmp_c_i, tmp_a_k_j, tmp_a_j_i, ts, ts); #endif DEBUG_PRINT("Computing omp_gemm[%03d][%03d] - End\n", j, i); TASK_DEPTH_DECR; } } else if (block_rank[k*nt+i] == mype && block_rank[k*nt+j] != mype) { #pragma omp task depend(in: A[k][i], comm_sentinel) depend(out: A[j][i]) firstprivate(k, j, i) { TASK_DEPTH_INCR; DEBUG_PRINT("Computing omp_gemm[%03d][%03d] - Start\n", j, i); #if CHAMELEON chameleon_map_data_entry_t* args = (chameleon_map_data_entry_t*) malloc(5*sizeof(chameleon_map_data_entry_t)); args[0] = chameleon_map_data_entry_create(tmp_a_k_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[1] = chameleon_map_data_entry_create(tmp_c_j, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[2] = chameleon_map_data_entry_create(tmp_a_j_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_FROM); args[3] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); args[4] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); cham_migratable_task_t *cur_task = chameleon_create_task((void *)&omp_gemm, 5, args); int32_t res = chameleon_add_task(cur_task); free(args); TYPE_TASK_ID tmp_id = chameleon_get_last_local_task_id_added(); while(!chameleon_local_task_has_finished(tmp_id)) { chameleon_taskyield(); } #else omp_gemm(tmp_a_k_i, tmp_c_j, tmp_a_j_i, ts, ts); #endif DEBUG_PRINT("Computing omp_gemm[%03d][%03d] - End\n", j, i); TASK_DEPTH_DECR; } } else { #pragma omp task depend(in: comm_sentinel) depend(out: A[j][i]) firstprivate(k, j, i) { TASK_DEPTH_INCR; DEBUG_PRINT("Computing omp_gemm[%03d][%03d] - Start\n", j, i); #if CHAMELEON chameleon_map_data_entry_t* args = (chameleon_map_data_entry_t*) malloc(5*sizeof(chameleon_map_data_entry_t)); args[0] = chameleon_map_data_entry_create(tmp_c_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[1] = chameleon_map_data_entry_create(tmp_c_j, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[2] = chameleon_map_data_entry_create(tmp_a_j_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_FROM); args[3] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); args[4] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); cham_migratable_task_t *cur_task = chameleon_create_task((void *)&omp_gemm, 5, args); int32_t res = chameleon_add_task(cur_task); free(args); TYPE_TASK_ID tmp_id = chameleon_get_last_local_task_id_added(); while(!chameleon_local_task_has_finished(tmp_id)) { chameleon_taskyield(); } #else omp_gemm(tmp_c_i, tmp_c_j, tmp_a_j_i, ts, ts); #endif DEBUG_PRINT("Computing omp_gemm[%03d][%03d] - End\n", j, i); TASK_DEPTH_DECR; } } } } if (block_rank[i*nt+i] == mype) { if (block_rank[k*nt+i] == mype) { #pragma omp task depend(in: A[k][i], comm_sentinel) depend(out: A[i][i]) firstprivate(k, i) { TASK_DEPTH_INCR; DEBUG_PRINT("Computing omp_syrk[%03d][%03d] - Start\n", i, i); #if CHAMELEON chameleon_map_data_entry_t* args = (chameleon_map_data_entry_t*) malloc(4*sizeof(chameleon_map_data_entry_t)); args[0] = chameleon_map_data_entry_create(tmp_a_k_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[1] = chameleon_map_data_entry_create(tmp_a_i_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_FROM); args[2] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); args[3] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); cham_migratable_task_t *cur_task = chameleon_create_task((void *)&omp_syrk, 4, args); int32_t res = chameleon_add_task(cur_task); free(args); TYPE_TASK_ID tmp_id = chameleon_get_last_local_task_id_added(); while(!chameleon_local_task_has_finished(tmp_id)) { chameleon_taskyield(); } #else omp_syrk(tmp_a_k_i, tmp_a_i_i, ts, ts); #endif DEBUG_PRINT("Computing omp_syrk[%03d][%03d] - End\n", i, i); TASK_DEPTH_DECR; } } else { #pragma omp task depend(in: comm_sentinel) depend(out: A[i][i]) firstprivate(k, i) { TASK_DEPTH_INCR; DEBUG_PRINT("Computing omp_syrk[%03d][%03d] - Start\n", i, i); #if CHAMELEON chameleon_map_data_entry_t* args = (chameleon_map_data_entry_t*) malloc(4*sizeof(chameleon_map_data_entry_t)); args[0] = chameleon_map_data_entry_create(tmp_c_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO); args[1] = chameleon_map_data_entry_create(tmp_a_i_i, ts*ts*sizeof(double), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_FROM); args[2] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); args[3] = chameleon_map_data_entry_create(literal_ts, sizeof(void*), CHAM_OMP_TGT_MAPTYPE_TO | CHAM_OMP_TGT_MAPTYPE_LITERAL); cham_migratable_task_t *cur_task = chameleon_create_task((void *)&omp_syrk, 4, args); int32_t res = chameleon_add_task(cur_task); free(args); TYPE_TASK_ID tmp_id = chameleon_get_last_local_task_id_added(); while(!chameleon_local_task_has_finished(tmp_id)) { chameleon_taskyield(); } #else omp_syrk(tmp_c_i, tmp_a_i_i, ts, ts); #endif DEBUG_PRINT("Computing omp_syrk[%03d][%03d] - End\n", i, i); TASK_DEPTH_DECR; } } } } } END_TIMING(TIME_CREATE); } }// pragma omp single #if defined(CHAMELEON) || defined(CHAMELEON_TARGET) chameleon_distributed_taskwait(0); #else #pragma omp barrier #endif #if !defined(OMPSS_VER) #pragma omp single #endif { MPI_Barrier(MPI_COMM_WORLD); } #if !defined(OMPSS_VER) }// pragma omp parallel #endif END_TIMING(TIME_TOTAL); #if !defined(OMPSS_VER) #pragma omp parallel #pragma omp master #endif PRINT_TIMINGS(omp_get_num_threads()); FREE_TIMING(); #if defined(CHAMELEON) || defined(CHAMELEON_TARGET) chameleon_finalize(); #endif TASK_DEPTH_FINALIZE; }
hcb_basis_core.h
#ifndef _HCB_BASIS_CORE_H #define _HCB_BASIS_CORE_H #include <complex> #include <vector> #include <iostream> #include "general_basis_core.h" #include "numpy/ndarraytypes.h" #include "benes_perm.h" #include "openmp.h" namespace basis_general { template<class I> I inline hcb_map_bits(I s,const int map[],const int N){ I ss = 0; for(int i=N-1;i>=0;--i){ int j = map[i]; ss ^= (j<0 ? ((s&1)^1)<<(N+j) : (s&1)<<(N-j-1) ); s >>= 1; } return ss; } template<class I,class P=signed char> class hcb_basis_core : public general_basis_core<I,P> { public: std::vector<tr_benes<I>> benes_maps; std::vector<I> invs; hcb_basis_core(const int _N, const bool _fermionic=false) : \ general_basis_core<I>::general_basis_core(_N,_fermionic) {} hcb_basis_core(const int _N,const int _nt,const int _maps[], \ const int _pers[], const int _qs[], const bool _fermionic=false) : \ general_basis_core<I>::general_basis_core(_N,_nt,_maps,_pers,_qs,_fermionic) { benes_maps.resize(_nt); invs.resize(_nt); ta_index<I> index; for(int j=0;j<bit_info<I>::bits;j++){index.data[j] = no_index;} for(int i=0;i<_nt;i++){ const int * map = &general_basis_core<I,P>::maps[i*_N]; I inv = 0; for(int j=0;j<_N;j++){ int m = map[j]; int bit_j = _N - j - 1; if(m<0){ int bit_m = _N + m; index.data[bit_j] = bit_m; inv ^= ((I)1 << bit_j); } else{ int bit_m = _N - m -1; index.data[bit_j] = bit_m; } } gen_benes<I>(&benes_maps[i],index); invs[i] = inv; } } ~hcb_basis_core() {} npy_intp get_prefix(const I s,const int N_p){ return integer_cast<npy_intp,I>(s >> (general_basis_core<I,P>::N - N_p)); } I map_state(I s,int n_map,P &sign){ if(general_basis_core<I,P>::nt<=0){ return s; } return benes_bwd(&benes_maps[n_map],s^invs[n_map]); } void map_state(I s[],npy_intp M,int n_map,P sign[]){ if(general_basis_core<I,P>::nt<=0){ return; } const tr_benes<I> * benes_map = &benes_maps[n_map]; const I inv = invs[n_map]; #pragma omp for schedule(static) for(npy_intp i=0;i<M;i++){ s[i] = benes_bwd(benes_map,s[i]^inv); } } std::vector<int> count_particles(const I s){ std::vector<int> v(1); v[0] = bit_count(s,general_basis_core<I,P>::N); return v; } // I map_state(I s,int n_map,int &sign){ // if(general_basis_core<I,P>::nt<=0){ // return s; // } // const int n = general_basis_core<I,P>::N; // return hcb_map_bits(s,&general_basis_core<I,P>::maps[n_map*n],n); // } // void map_state(I s[],npy_intp M,int n_map,signed char sign[]){ // if(general_basis_core<I,P>::nt<=0){ // return; // } // const int n = general_basis_core<I,P>::N; // const int * map = &general_basis_core<I,P>::maps[n_map*n]; // #pragma omp for schedule(static,1) // for(npy_intp i=0;i<M;i++){ // s[i] = hcb_map_bits(s[i],map,n); // } // } I inline next_state_pcon(const I s,const I nns){ if(s==0){return s;} I t = (s | (s - 1)) + 1; return t | ((((t & (0-t)) / (s & (0-s))) >> 1) - 1); } int op(I &r,std::complex<double> &m,const int n_op,const char opstr[],const int indx[]){ const I s = r; const I one = 1; const int NN = general_basis_core<I,P>::N; for(int j=n_op-1;j>-1;j--){ const int ind = NN-indx[j]-1; const I b = (one << ind); const bool a = (bool)((r >> ind)&one); const char op = opstr[j]; switch(op){ case 'z': m *= (a?0.5:-0.5); break; case 'n': m *= (a?1:0); break; case 'x': r ^= b; m *= 0.5; break; case 'y': m *= (a?std::complex<double>(0,0.5):std::complex<double>(0,-0.5)); r ^= b; break; case '+': m *= (a?0:1); r ^= b; break; case '-': m *= (a?1:0); r ^= b; break; case 'I': break; default: return -1; } if(m.real()==0 && m.imag()==0){ r = s; break; } } return 0; } }; } #endif
gbdt.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <LightGBM/cuda/vector_cudahost.h> #include <LightGBM/utils/json11.h> #include <LightGBM/utils/threading.h> #include <string> #include <algorithm> #include <cstdio> #include <fstream> #include <map> #include <memory> #include <mutex> #include <unordered_map> #include <utility> #include <vector> #include "score_updater.hpp" namespace LightGBM { using json11::Json; /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config* gbdt_config, const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting* other) override { auto other_gbdt = reinterpret_cast<const GBDT*>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto& tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto& tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } void ShuffleModels(int start_iter, int end_iter) override { int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iter = std::max(0, start_iter); if (end_iter <= 0) { end_iter = total_iter; } end_iter = std::min(total_iter, end_iter); auto original_models = std::move(models_); std::vector<int> indices(total_iter); for (int i = 0; i < total_iter; ++i) { indices[i] = i; } Random tmp_rand(17); for (int i = start_iter; i < end_iter - 1; ++i) { int j = tmp_rand.NextShort(i + 1, end_iter); std::swap(indices[i], indices[j]); } models_ = std::vector<std::unique_ptr<Tree>>(); for (int i = 0; i < total_iter; ++i) { for (int j = 0; j < num_tree_per_iteration_; ++j) { int tree_idx = indices[i] * num_tree_per_iteration_ + j; auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get()))); models_.push_back(std::move(new_tree)); } } } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config* gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset* valid_data, const std::vector<const Metric*>& valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequency of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string& model_output_path) override; void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ bool TrainOneIter(const score_t* gradients, const score_t* hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ const double* GetTrainingScore(int64_t* out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override; /*! * \brief Set prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param scores used to update the predictions */ void SetPredictAt(int data_idx, double* scores) override; /*! * \brief Get number of prediction for one data * \param start_iteration Start index of the iteration to predict * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int start_iteration, int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_pred_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); start_iteration = std::max(start_iteration, 0); start_iteration = std::min(start_iteration, max_iteration); if (num_iteration > 0) { num_pred_in_one_row *= static_cast<int>(std::min(max_iteration - start_iteration, num_iteration)); } else { num_pred_in_one_row *= (max_iteration - start_iteration); } } else if (is_pred_contrib) { num_pred_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_pred_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; void PredictContrib(const double* features, double* output) const override; void PredictContribByMap(const std::unordered_map<int, double>& features, std::vector<std::unordered_map<int, double>>* output) const override; /*! * \brief Dump model to json format string * \param start_iteration The model will be saved start from * \param num_iteration Number of iterations that want to dump, -1 means dump all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \return Json format string of model */ std::string DumpModel(int start_iteration, int num_iteration, int feature_importance_type) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char* filename) const override; /*! * \brief Save model to file * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToFile(int start_iteration, int num_iterations, int feature_importance_type, const char* filename) const override; /*! * \brief Save model to string * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \return Non-empty string if succeeded */ std::string SaveModelToString(int start_iteration, int num_iterations, int feature_importance_type) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Calculate upper bound value * \return upper bound value */ double GetUpperBoundValue() const override; /*! * \brief Calculate lower bound value * \return lower bound value */ double GetLowerBoundValue() const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int start_iteration, int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iteration = std::max(start_iteration, 0); start_iteration = std::min(start_iteration, num_iteration_for_pred_); if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_ - start_iteration); } else { num_iteration_for_pred_ = num_iteration_for_pred_ - start_iteration; } start_iteration_for_pred_ = start_iteration; if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ const char* SubModelName() const override { return "tree"; } bool IsLinear() const override { return linear_tree_; } protected: virtual bool GetIsConstHessian(const ObjectiveFunction* objective_function) { if (objective_function != nullptr) { return objective_function->IsConstantHessian(); } else { return false; } } /*! * \brief Print eval result and check early stopping */ virtual bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config* config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); virtual data_size_t BaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer); data_size_t BalancedBaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree* tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const; /*! * \brief Print metric result of current iteration * \param iter Current iteration * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(int class_id, bool update_scorer); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Only use first metric for early stopping */ bool es_first_metric_only_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; #ifdef USE_CUDA /*! \brief First order derivative of training data */ std::vector<score_t, CHAllocator<score_t>> gradients_; /*! \brief Second order derivative of training data */ std::vector<score_t, CHAllocator<score_t>> hessians_; #else /*! \brief First order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> gradients_; /*! \brief Second order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> hessians_; #endif /*! \brief Store the indices of in-bag data */ std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Start iteration of used model */ int start_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; bool balanced_bagging_; std::string loaded_parameter_; std::vector<int8_t> monotone_constraints_; const int bagging_rand_block_ = 1024; std::vector<Random> bagging_rands_; ParallelPartitionRunner<data_size_t, false> bagging_runner_; Json forced_splits_json_; bool linear_tree_; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
6749.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose void kernel_heat_3d(int tsteps, int n, double A[200 + 0][200 + 0][200 + 0], double B[200 + 0][200 + 0][200 + 0]) { int t12; int t10; int t8; int t6; int t4; int t2; for (t2 = 1; t2 <= 1000; t2 += 1) { #pragma omp parallel for private(t4,t6,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < n - 2 ? t4 + 15 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 16) for (t10 = t8; t10 <= (t8 + 15 < n - 2 ? t8 + 15 : n - 2); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 1) B[t6][t10][t12] = 0.125 * (A[t6 + 1][t10][t12] - 2 * A[t6][t10][t12] + A[t6 - 1][t10][t12]) + 0.125 * (A[t6][t10 + 1][t12] - 2 * A[t6][t10][t12] + A[t6][t10 - 1][t12]) + 0.125 * (A[t6][t10][t12 + 1] - 2 * A[t6][t10][t12] + A[t6][t10][t12 - 1]) + A[t6][t10][t12]; #pragma omp parallel for private(t4,t6,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < n - 2 ? t4 + 15 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 16) for (t10 = t8; t10 <= (t8 + 15 < n - 2 ? t8 + 15 : n - 2); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 1) A[t6][t10][t12] = 0.125 * (B[t6 + 1][t10][t12] - 2 * B[t6][t10][t12] + B[t6 - 1][t10][t12]) + 0.125 * (B[t6][t10 + 1][t12] - 2 * B[t6][t10][t12] + B[t6][t10 - 1][t12]) + 0.125 * (B[t6][t10][t12 + 1] - 2 * B[t6][t10][t12] + B[t6][t10][t12 - 1]) + B[t6][t10][t12]; } }
HashFactory.c
/* Copyright (C) 1991-2012 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* We do support the IEC 559 math functionality, real and complex. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ /* Copyright 2013-14. Los Alamos National Security, LLC. This material was produced * under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National * Laboratory (LANL), which is operated by Los Alamos National Security, LLC * for the U.S. Department of Energy. The U.S. Government has rights to use, * reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS * ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR * ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified * to produce derivative works, such modified software should be clearly marked, * so as not to confuse it with the version available from LANL. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * Under this license, it is required to include a reference to this work. We * request that each derivative work contain a reference to LANL Copyright * Disclosure C14043/LA-CC-14-003 so that this work's impact can be roughly * measured. In addition, it is requested that a modifier is included as in * the following example: * * //<Uses | improves on | modified from> LANL Copyright Disclosure C14043/LA-CC-14-003 * * This is LANL Copyright Disclosure C14043/LA-CC-14-003 */ /** * @file HashFactory.c * @author Peter Ahrens * @date Thu Jun 6 2013 */ // #ifdef _OPENMP #include <omp.h> #endif #include "HashFactory.h" #ifdef HAVE_OPENCL #ifdef __APPLE_CC__ #include <OpenCL/OpenCL.h> #else #include <CL/cl.h> #endif #else typedef int cl_kernel; #define CL_CONTEXT_DEVICES 0 #define CL_MEM_READ_WRITE 0 #define CL_TRUE 1 #define CL_MEM_READ_ONLY 0 #define CL_MEM_COPY_HOST_PTR 0 #define CL_MEM_WRITE_ONLY 0 #define CL_KERNEL_WORK_GROUP_SIZE 128 int clRetainContext(int context) { return context; } int clRetainCommandQueue(int command_queue) { return command_queue; } int clGetContextInfo(int context, int param, size_t size, void *value, size_t * size_ret) { return 0; } int clReleaseContext(int context) { return context; } int clReleaseCommandQueue(int command_queue) { return command_queue; } int clReleaseProgram(int program) { return program; } int clRetainKernel(int kernel) { return kernel; } int clRetainProgram(int program) { return program; } cl_mem clCreateBuffer(int context, int flags, size_t size, void *value, int *size_ret) { return 0; } int clEnqueueWriteBuffer(int command_queue, void *buffer, int blocking_write, size_t offset, size_t cb, const void *ptr, uint nevents, const int *wait_list, int *event) { return 0; } int clEnqueueReadBuffer(int command_queue, void *buffer, int blocking_write, size_t offset, size_t cb, const void *ptr, uint nevents, const int *wait_list, int *event) { return 0; } int clCreateKernel(int program, const char *kernel_name, int *errcode_ret) { return 0; } int clReleaseKernel(int kernel) { return kernel; } int clReleaseMemObject(void *memobj) { return 0; } int clSetKernelArg(int kernel, uint arg_index, size_t arg_size, const void *arg_value) { return 0; } int clGetKernelWorkGroupInfo(int kernel, int device, int param_name, size_t size, void *value, size_t * size_ret) { return 0; } int clEnqueueNDRangeKernel(int command_queue, int kernel, uint work_dim, const size_t * offset, const size_t * size, const size_t * local_size, uint nevents, const int *wait_list, int *event) { return 0; } int clFinish(int command_queue) { return 0; } #endif #define PRIME_NUM_CHECKS 20 #include <math.h> /* Copyright 2013-14. Los Alamos National Security, LLC. This material was produced * under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National * Laboratory (LANL), which is operated by Los Alamos National Security, LLC * for the U.S. Department of Energy. The U.S. Government has rights to use, * reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS * ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR * ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified * to produce derivative works, such modified software should be clearly marked, * so as not to confuse it with the version available from LANL. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * Under this license, it is required to include a reference to this work. We * request that each derivative work contain a reference to LANL Copyright * Disclosure C14043/LA-CC-14-003 so that this work's impact can be roughly * measured. In addition, it is requested that a modifier is included as in * the following example: * * //<Uses | improves on | modified from> LANL Copyright Disclosure C14043/LA-CC-14-003 * * This is LANL Copyright Disclosure C14043/LA-CC-14-003 */ /* Copyright 2013-14. Los Alamos National Security, LLC. This material was produced * under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National * Laboratory (LANL), which is operated by Los Alamos National Security, LLC * for the U.S. Department of Energy. The U.S. Government has rights to use, * reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS * ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR * ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified * to produce derivative works, such modified software should be clearly marked, * so as not to confuse it with the version available from LANL. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * Under this license, it is required to include a reference to this work. We * request that each derivative work contain a reference to LANL Copyright * Disclosure C14043/LA-CC-14-003 so that this work's impact can be roughly * measured. In addition, it is requested that a modifier is included as in * the following example: * * //<Uses | improves on | modified from> LANL Copyright Disclosure C14043/LA-CC-14-003 * * This is LANL Copyright Disclosure C14043/LA-CC-14-003 */ /* Copyright 2013-14. Los Alamos National Security, LLC. This material was produced * under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National * Laboratory (LANL), which is operated by Los Alamos National Security, LLC * for the U.S. Department of Energy. The U.S. Government has rights to use, * reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS * ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR * ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified * to produce derivative works, such modified software should be clearly marked, * so as not to confuse it with the version available from LANL. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * Under this license, it is required to include a reference to this work. We * request that each derivative work contain a reference to LANL Copyright * Disclosure C14043/LA-CC-14-003 so that this work's impact can be roughly * measured. In addition, it is requested that a modifier is included as in * the following example: * * //<Uses | improves on | modified from> LANL Copyright Disclosure C14043/LA-CC-14-003 * * This is LANL Copyright Disclosure C14043/LA-CC-14-003 */ /* Copyright 2013-14. Los Alamos National Security, LLC. This material was produced * under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National * Laboratory (LANL), which is operated by Los Alamos National Security, LLC * for the U.S. Department of Energy. The U.S. Government has rights to use, * reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS * ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR * ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified * to produce derivative works, such modified software should be clearly marked, * so as not to confuse it with the version available from LANL. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * Under this license, it is required to include a reference to this work. We * request that each derivative work contain a reference to LANL Copyright * Disclosure C14043/LA-CC-14-003 so that this work's impact can be roughly * measured. In addition, it is requested that a modifier is included as in * the following example: * * //<Uses | improves on | modified from> LANL Copyright Disclosure C14043/LA-CC-14-003 * * This is LANL Copyright Disclosure C14043/LA-CC-14-003 */ /* Copyright 2013-14. Los Alamos National Security, LLC. This material was produced * under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National * Laboratory (LANL), which is operated by Los Alamos National Security, LLC * for the U.S. Department of Energy. The U.S. Government has rights to use, * reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS * ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR * ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified * to produce derivative works, such modified software should be clearly marked, * so as not to confuse it with the version available from LANL. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * Under this license, it is required to include a reference to this work. We * request that each derivative work contain a reference to LANL Copyright * Disclosure C14043/LA-CC-14-003 so that this work's impact can be roughly * measured. In addition, it is requested that a modifier is included as in * the following example: * * //<Uses | improves on | modified from> LANL Copyright Disclosure C14043/LA-CC-14-003 * * This is LANL Copyright Disclosure C14043/LA-CC-14-003 */ /* Copyright 2013-14. Los Alamos National Security, LLC. This material was produced * under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National * Laboratory (LANL), which is operated by Los Alamos National Security, LLC * for the U.S. Department of Energy. The U.S. Government has rights to use, * reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS * ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR * ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified * to produce derivative works, such modified software should be clearly marked, * so as not to confuse it with the version available from LANL. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * Under this license, it is required to include a reference to this work. We * request that each derivative work contain a reference to LANL Copyright * Disclosure C14043/LA-CC-14-003 so that this work's impact can be roughly * measured. In addition, it is requested that a modifier is included as in * the following example: * * //<Uses | improves on | modified from> LANL Copyright Disclosure C14043/LA-CC-14-003 * * This is LANL Copyright Disclosure C14043/LA-CC-14-003 */ /* Copyright 2013-14. Los Alamos National Security, LLC. This material was produced * under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National * Laboratory (LANL), which is operated by Los Alamos National Security, LLC * for the U.S. Department of Energy. The U.S. Government has rights to use, * reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS * ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR * ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified * to produce derivative works, such modified software should be clearly marked, * so as not to confuse it with the version available from LANL. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * Under this license, it is required to include a reference to this work. We * request that each derivative work contain a reference to LANL Copyright * Disclosure C14043/LA-CC-14-003 so that this work's impact can be roughly * measured. In addition, it is requested that a modifier is included as in * the following example: * * //<Uses | improves on | modified from> LANL Copyright Disclosure C14043/LA-CC-14-003 * * This is LANL Copyright Disclosure C14043/LA-CC-14-003 */ /* Copyright 2013-14. Los Alamos National Security, LLC. This material was produced * under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National * Laboratory (LANL), which is operated by Los Alamos National Security, LLC * for the U.S. Department of Energy. The U.S. Government has rights to use, * reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS * ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR * ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified * to produce derivative works, such modified software should be clearly marked, * so as not to confuse it with the version available from LANL. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * Under this license, it is required to include a reference to this work. We * request that each derivative work contain a reference to LANL Copyright * Disclosure C14043/LA-CC-14-003 so that this work's impact can be roughly * measured. In addition, it is requested that a modifier is included as in * the following example: * * //<Uses | improves on | modified from> LANL Copyright Disclosure C14043/LA-CC-14-003 * * This is LANL Copyright Disclosure C14043/LA-CC-14-003 */ /* Copyright 2013-14. Los Alamos National Security, LLC. This material was produced * under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National * Laboratory (LANL), which is operated by Los Alamos National Security, LLC * for the U.S. Department of Energy. The U.S. Government has rights to use, * reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS * ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR * ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified * to produce derivative works, such modified software should be clearly marked, * so as not to confuse it with the version available from LANL. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * Under this license, it is required to include a reference to this work. We * request that each derivative work contain a reference to LANL Copyright * Disclosure C14043/LA-CC-14-003 so that this work's impact can be roughly * measured. In addition, it is requested that a modifier is included as in * the following example: * * //<Uses | improves on | modified from> LANL Copyright Disclosure C14043/LA-CC-14-003 * * This is LANL Copyright Disclosure C14043/LA-CC-14-003 */ /* Copyright 2013-14. Los Alamos National Security, LLC. This material was produced * under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National * Laboratory (LANL), which is operated by Los Alamos National Security, LLC * for the U.S. Department of Energy. The U.S. Government has rights to use, * reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS * ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR * ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified * to produce derivative works, such modified software should be clearly marked, * so as not to confuse it with the version available from LANL. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * Under this license, it is required to include a reference to this work. We * request that each derivative work contain a reference to LANL Copyright * Disclosure C14043/LA-CC-14-003 so that this work's impact can be roughly * measured. In addition, it is requested that a modifier is included as in * the following example: * * //<Uses | improves on | modified from> LANL Copyright Disclosure C14043/LA-CC-14-003 * * This is LANL Copyright Disclosure C14043/LA-CC-14-003 */ static int reportLevel = 0; const char *HashFactory_source; const char *Hash_GetKernelSourceString() { return HashFactory_source; } size_t roundUpToNearest(size_t x, size_t r) { return (((x - 1) / r) + 1) * r; } int modularPow(int base, int exponent, int modulus) { int result = 1; while (exponent) { if (exponent & 1) result = ((long long int)result * base) % modulus; exponent >>= 1; base = ((long long int)base * base) % modulus; } return result; } int largestProthPrimeUnder(int N) { if (N < 4) { return N; } //determine the nearest proth number int n; int m; frexp((double)N, &n); n /= 2; int s = 1 << n; int p = s * ((N - 1) / s) + 1; int i; int a; srand(p); while (p > 3) { //check if a proth number is prime for (i = 0; i < PRIME_NUM_CHECKS; i++) { a = rand(); if (modularPow(a, (p - 1) / 2, p) == p - 1) { return p; } } //determine the next proth number if (p - 1 == s * s / 4) { s /= 2; } p -= s; } return 3; } int smallestProthPrimeAbove(int N) { if (N < 4) { return N; } //determine the nearest proth number int n; int m; frexp((double)N, &n); n /= 2; int s = 1 << n; int p = s * ((N - 1) / s) + 1; int i; int a; srand(p); while (1) { //determine the next proth number if (p - 1 == s * s) { s *= 2; } p += s; //check if a proth number is prime for (i = 0; i < PRIME_NUM_CHECKS; i++) { a = rand(); if (modularPow(a, (p - 1) / 2, p) == p - 1) { return p; } } } return 3; } int intLog2(int n) { int result = 0; while (n >>= 1) { result++; } return result; } void Hash_SetReportLevel(int level) { reportLevel = level; } int Hash_GetReportLevel() { return reportLevel; } char *Hash_ExitCodeString(int exitCode) { switch (exitCode) { case HASH_EXIT_CODE_NORMAL: return "Normal"; case HASH_EXIT_CODE_ERROR: return "Error"; case HASH_EXIT_CODE_OVERWRITE: return "Overwrite"; case HASH_EXIT_CODE_KEY_DNE: return "Key Does Not Exist"; case HASH_EXIT_CODE_CYCLE: return "Cycle"; case HASH_EXIT_CODE_MAX_ENTRIES_EXCEEDED: return "Maximum Number Of Entries Exceeded"; default: return "Unknown"; } } void Hash_ExitCodeDebug(int exitCode) { if (exitCode != HASH_EXIT_CODE_NORMAL) { printf("HashExitCode: %s\n", Hash_ExitCodeString(exitCode)); } } struct intintHash_Table_ { char *tableData; cl_mem tableDataBuffer; int (*destroyFunc) (intintHash_Table *); int (*setupFunc) (intintHash_Table *); int (*emptyFunc) (intintHash_Table *); int (*queryFunc) (intintHash_Table *, size_t, int *, int *); int (*querySingleFunc) (intintHash_Table *, int, int *); int (*insertFunc) (intintHash_Table *, size_t, int *, int *); int (*insertSingleFunc) (intintHash_Table *, int, int); int (*insertNoOverwriteFunc) (intintHash_Table *, size_t, int *, int *); int (*insertSingleNoOverwriteFunc) (intintHash_Table *, int, int); int (*bufferQueryFunc) (intintHash_Table *, size_t, cl_mem, cl_mem); int (*bufferInsertFunc) (intintHash_Table *, size_t, cl_mem, cl_mem); int (*bufferInsertNoOverwriteFunc) (intintHash_Table *, size_t, cl_mem, cl_mem); cl_context context; cl_command_queue queue; cl_program utilProgram; cl_kernel emptyKernel; size_t emptyKernelLocalWorkSize; cl_program program; cl_kernel querySingleKernel; cl_kernel insertSingleKernel; cl_kernel insertSingleNoOverwriteKernel; size_t localWorkSize; }; struct intintHash_Factory_ { cl_context context; cl_program program; cl_command_queue queue; int hashTypesAvailable; cl_program utilProgram[HASH_NUM_CL_HASHES]; cl_kernel emptyKernel[HASH_NUM_CL_HASHES]; size_t emptyKernelLocalWorkSize[HASH_NUM_CL_HASHES]; cl_kernel querySingleKernel[HASH_NUM_CL_HASHES]; cl_kernel insertSingleKernel[HASH_NUM_CL_HASHES]; cl_kernel insertSingleNoOverwriteKernel[HASH_NUM_CL_HASHES]; int emptyValue; size_t localWorkSize; intintHash_Table *(*createFunc[HASH_NUM_HASHES]) (intintHash_Factory *, int hashIndex, size_t keyRange, size_t numEntries, float loadFactor); int (*destroyFunc[HASH_NUM_HASHES]) (intintHash_Factory *, int hashIndex); }; intintHash_Factory *intintHash_CreateFactory(int hashTypes, int *emptyValue, size_t localWorkSize, cl_context * context, cl_command_queue * queue) { if (hashTypes == 0) { hashTypes = HASH_ALL_C_HASHES; } if (!(hashTypes & HASH_ALL_HASHES)) { printf("Please specify a valid hash type to create.\n"); exit(1); } hashTypes &= HASH_ALL_HASHES; if ((hashTypes & HASH_SENTINEL_PERFECT_HASHES) == hashTypes && emptyValue == NULL) { printf ("emptyValue must be valid if a sentinel perfect hash is the only option available.\n"); exit(1); } intintHash_Factory *factory = (intintHash_Factory *) malloc(sizeof(intintHash_Factory)); if (emptyValue == NULL) { hashTypes &= !HASH_SENTINEL_PERFECT_HASHES; } else { factory->emptyValue = *emptyValue; } factory->hashTypesAvailable = hashTypes; if (hashTypes & HASH_ALL_CL_HASHES) { if (localWorkSize == 0) { factory->localWorkSize = HASH_DEFAULT_LOCAL_WORK_SIZE; } else { factory->localWorkSize = 1 << intLog2(localWorkSize); } if (context == NULL) { CLHash_Utilities_CreateContext(&factory->context, &factory->queue); } else { factory->context = *context; clRetainContext(*context); if (queue == NULL) { printf ("Please specify a command queue for your context.\n"); exit(-1); } factory->queue = *queue; clRetainCommandQueue(*queue); } cl_int error; cl_device_id device; error = clGetContextInfo(factory->context, CL_CONTEXT_DEVICES, sizeof(device), &device, NULL); CLHash_Utilities_HandleError(error, "intintHash_CreateFactory", "clGetContextInfo"); factory->program = CLHash_Utilities_BuildProgramString(factory->context, device, Hash_GetKernelSourceString ()); } int hashType = 1; for (int hashIndex = 0; hashIndex < HASH_NUM_HASHES; hashIndex++) { hashType = 1 << hashIndex; switch (hashType & hashTypes) { case IDENTITY_PERFECT_HASH_ID: intintIdentityPerfectHash_CreateFactory(factory, hashIndex); break; case IDENTITY_PERFECT_CL_HASH_ID: intintIdentityPerfectCLHash_CreateFactory(factory, hashIndex); break; case IDENTITY_PERFECT_OPENMP_HASH_ID: intintIdentityPerfectOpenMPHash_CreateFactory(factory, hashIndex); break; case IDENTITY_SENTINEL_PERFECT_HASH_ID: intintIdentitySentinelPerfectHash_CreateFactory(factory, hashIndex); break; case IDENTITY_SENTINEL_PERFECT_CL_HASH_ID: intintIdentitySentinelPerfectCLHash_CreateFactory (factory, hashIndex); break; case IDENTITY_SENTINEL_PERFECT_OPENMP_HASH_ID: intintIdentitySentinelPerfectOpenMPHash_CreateFactory (factory, hashIndex); break; case LCG_LINEAR_OPEN_COMPACT_HASH_ID: intintLCGLinearOpenCompactHash_CreateFactory(factory, hashIndex); break; case LCG_LINEAR_OPEN_COMPACT_CL_HASH_ID: intintLCGLinearOpenCompactCLHash_CreateFactory(factory, hashIndex); break; case LCG_LINEAR_OPEN_COMPACT_OPENMP_HASH_ID: intintLCGLinearOpenCompactOpenMPHash_CreateFactory (factory, hashIndex); break; case LCG_QUADRATIC_OPEN_COMPACT_HASH_ID: intintLCGQuadraticOpenCompactHash_CreateFactory(factory, hashIndex); break; case LCG_QUADRATIC_OPEN_COMPACT_CL_HASH_ID: intintLCGQuadraticOpenCompactCLHash_CreateFactory (factory, hashIndex); break; case LCG_QUADRATIC_OPEN_COMPACT_OPENMP_HASH_ID: intintLCGQuadraticOpenCompactOpenMPHash_CreateFactory (factory, hashIndex); break; } } return factory; } int intintHash_DestroyFactory(intintHash_Factory * factory) { int hashType = 1; for (int hashIndex = 0; hashIndex < HASH_NUM_HASHES; hashIndex++) { hashType = 1 << hashIndex; switch (hashType & factory->hashTypesAvailable) { case IDENTITY_PERFECT_HASH_ID: intintIdentityPerfectHash_DestroyFactory(factory, hashIndex); break; case IDENTITY_PERFECT_CL_HASH_ID: intintIdentityPerfectCLHash_DestroyFactory(factory, hashIndex); break; case IDENTITY_PERFECT_OPENMP_HASH_ID: intintIdentityPerfectOpenMPHash_DestroyFactory(factory, hashIndex); break; case IDENTITY_SENTINEL_PERFECT_HASH_ID: intintIdentitySentinelPerfectHash_DestroyFactory (factory, hashIndex); break; case IDENTITY_SENTINEL_PERFECT_CL_HASH_ID: intintIdentitySentinelPerfectCLHash_DestroyFactory (factory, hashIndex); break; case IDENTITY_SENTINEL_PERFECT_OPENMP_HASH_ID: intintIdentitySentinelPerfectOpenMPHash_DestroyFactory (factory, hashIndex); break; case LCG_LINEAR_OPEN_COMPACT_HASH_ID: intintLCGLinearOpenCompactHash_DestroyFactory(factory, hashIndex); break; case LCG_LINEAR_OPEN_COMPACT_CL_HASH_ID: intintLCGLinearOpenCompactCLHash_DestroyFactory(factory, hashIndex); break; case LCG_LINEAR_OPEN_COMPACT_OPENMP_HASH_ID: intintLCGLinearOpenCompactOpenMPHash_DestroyFactory (factory, hashIndex); break; case LCG_QUADRATIC_OPEN_COMPACT_HASH_ID: intintLCGQuadraticOpenCompactHash_DestroyFactory (factory, hashIndex); break; case LCG_QUADRATIC_OPEN_COMPACT_CL_HASH_ID: intintLCGQuadraticOpenCompactCLHash_DestroyFactory (factory, hashIndex); break; case LCG_QUADRATIC_OPEN_COMPACT_OPENMP_HASH_ID: intintLCGQuadraticOpenCompactOpenMPHash_DestroyFactory (factory, hashIndex); break; } hashIndex++; } if (factory->hashTypesAvailable & HASH_ALL_CL_HASHES) { clReleaseContext(factory->context); clReleaseCommandQueue(factory->queue); clReleaseProgram(factory->program); } free(factory); return (0); } intintHash_Table *intintHash_CreateTable(intintHash_Factory * factory, int hashTypes, size_t keyRange, size_t numEntries, float loadFactor) { if (loadFactor > 1.0 || loadFactor < HASH_MIN_LOAD_FACTOR) { loadFactor = HASH_DEFAULT_LOAD_FACTOR; } if (hashTypes == 0) { hashTypes = factory->hashTypesAvailable; if ((hashTypes & HASH_ALL_CL_HASHES) && (hashTypes & HASH_ALL_OPENMP_HASHES) && (hashTypes & HASH_ALL_C_HASHES)) { hashTypes &= HASH_ALL_C_HASHES; } } if (!(hashTypes & factory->hashTypesAvailable)) { printf ("None of the selected hash types are supported by this factory.\n"); exit(1); } hashTypes &= factory->hashTypesAvailable; if ((hashTypes & HASH_ALL_CL_HASHES) && (hashTypes & HASH_ALL_OPENMP_HASHES) && (hashTypes & HASH_ALL_C_HASHES)) { printf("Please decide between OpenCL, OpenMP or C hash.\n"); exit(1); } if ((hashTypes & HASH_PERFECT_HASHES) == hashTypes && keyRange == 0) { printf ("keyRange must be set if a perfect hash is the only option available.\n"); exit(1); } if ((hashTypes & HASH_COMPACT_HASHES) == hashTypes && numEntries == 0) { printf ("numEntries must be set if a compact hash is the only option available.\n"); exit(1); } if (numEntries == 0 && keyRange == 0) { printf("either numEntries or keyRange must be set.\n"); exit(1); } size_t perfectNumBuckets = keyRange; size_t compactNumBuckets = (size_t) (numEntries / loadFactor); int hashIndex; if ((hashTypes & HASH_SENTINEL_PERFECT_HASHES) && ((hashTypes == (hashTypes & HASH_SENTINEL_PERFECT_HASHES)) || (compactNumBuckets == 0 || (perfectNumBuckets / compactNumBuckets < HASH_PERFECT_COMPACT_SWITCH_FACTOR)))) { hashIndex = intLog2(hashTypes & HASH_SENTINEL_PERFECT_HASHES); } else if ((hashTypes & HASH_NOSENTINEL_PERFECT_HASHES) && ((hashTypes == (hashTypes & HASH_PERFECT_HASHES)) || (compactNumBuckets == 0 || (perfectNumBuckets / compactNumBuckets < HASH_PERFECT_COMPACT_SWITCH_FACTOR)))) { hashIndex = intLog2(hashTypes & HASH_NOSENTINEL_PERFECT_HASHES); } else if ((hashTypes & HASH_LINEAR_COMPACT_HASHES) && ((hashTypes == (hashTypes & (HASH_PERFECT_HASHES | HASH_LINEAR_COMPACT_HASHES))) || ((hashTypes & HASH_COMPACT_HASHES & HASH_LINEAR_COMPACT_HASHES) == (hashTypes & HASH_COMPACT_HASHES) || loadFactor > 0.5))) { hashIndex = intLog2(hashTypes & HASH_LINEAR_COMPACT_HASHES); } else { hashIndex = intLog2(hashTypes & HASH_QUADRATIC_COMPACT_HASHES); } intintHash_Table *table = factory->createFunc[hashIndex] (factory, hashIndex, keyRange, numEntries, loadFactor); return table; } int intintHash_SetupTable(intintHash_Table * table) { table->setupFunc(table); return (0); } int intintHash_EmptyTable(intintHash_Table * table) { table->emptyFunc(table); return (0); } int intintHash_DestroyTable(intintHash_Table * table) { table->destroyFunc(table); return (0); } cl_mem intintHash_GetTableDataBuffer(intintHash_Table * table) { return table->tableDataBuffer; } cl_mem *intintHash_GetTableDataBufferPtr(intintHash_Table * table) { return &table->tableDataBuffer; } int intintHash_GetTableType(intintHash_Table * table) { return ((int *)table->tableData)[0]; } int intintHash_Query(intintHash_Table * table, size_t numKeys, int *keys, int *valuesOutput) { table->queryFunc(table, numKeys, keys, valuesOutput); return (0); } int intintHash_QuerySingle(intintHash_Table * table, int key, int *valueOutput) { table->querySingleFunc(table, key, valueOutput); return (0); } int intintHash_Insert(intintHash_Table * table, size_t numEntries, int *keys, int *values) { table->insertFunc(table, numEntries, keys, values); return (0); } int intintHash_InsertSingle(intintHash_Table * table, int key, int value) { table->insertSingleFunc(table, key, value); return (0); } int intintHash_InsertNoOverwrite(intintHash_Table * table, size_t numEntries, int *keys, int *values) { table->insertNoOverwriteFunc(table, numEntries, keys, values); return (0); } int intintHash_InsertSingleNoOverwrite(intintHash_Table * table, int key, int value) { table->insertSingleNoOverwriteFunc(table, key, value); return (0); } int intintHash_BufferQuery(intintHash_Table * table, size_t numKeys, cl_mem keys, cl_mem valuesOutput) { table->bufferQueryFunc(table, numKeys, keys, valuesOutput); return (0); } int intintHash_BufferInsert(intintHash_Table * table, size_t numEntries, cl_mem keys, cl_mem values) { table->bufferInsertFunc(table, numEntries, keys, values); return (0); } int intintHash_BufferInsertNoOverwrite(intintHash_Table * table, size_t numEntries, cl_mem keys, cl_mem values) { table->bufferInsertNoOverwriteFunc(table, numEntries, keys, values); return (0); } typedef struct intintIdentityPerfectHash_TableData { int hashID; unsigned int numBuckets; char compressFuncData; } intintIdentityPerfectHash_TableData; typedef struct intintIdentityPerfectHash_Bucket { int key; int value; } intintIdentityPerfectHash_Bucket; intintHash_Table *intintIdentityPerfectHash_CreateTable(intintHash_Factory * factory, int hashIndex, size_t keyRange, size_t numEntries, float loadFactor) { intintHash_Table *table = (intintHash_Table *) malloc(sizeof(intintHash_Table)); table->destroyFunc = &intintIdentityPerfectHash_DestroyTable; table->setupFunc = &intintIdentityPerfectHash_SetupTable; table->emptyFunc = &intintIdentityPerfectHash_EmptyTable; table->queryFunc = &intintIdentityPerfectHash_Query; table->querySingleFunc = &intintIdentityPerfectHash_QuerySingle; table->insertFunc = &intintIdentityPerfectHash_Insert; table->insertSingleFunc = &intintIdentityPerfectHash_InsertSingle; table->insertNoOverwriteFunc = &intintIdentityPerfectHash_InsertNoOverwrite; table->insertSingleNoOverwriteFunc = &intintIdentityPerfectHash_InsertSingleNoOverwrite; table->tableData = (char *)malloc(sizeof(intintIdentityPerfectHash_TableData)); ((intintIdentityPerfectHash_TableData *) table->tableData)->hashID = IDENTITY_PERFECT_HASH_ID; ((intintIdentityPerfectHash_TableData *) table->tableData)->numBuckets = keyRange + 1; char *tempHashData = (char *)malloc(sizeof(intintIdentityPerfectHash_TableData) + ((intintIdentityPerfectHash_TableData *) table-> tableData)->numBuckets * sizeof(intintIdentityPerfectHash_Bucket)); memcpy(tempHashData, table->tableData, sizeof(intintIdentityPerfectHash_TableData)); free(table->tableData); table->tableData = tempHashData; return table; } int intintIdentityPerfectHash_CreateFactory(intintHash_Factory * factory, int hashIndex) { factory->createFunc[hashIndex] = &intintIdentityPerfectHash_CreateTable; factory->destroyFunc[hashIndex] = &intintIdentityPerfectHash_DestroyFactory;; return HASH_EXIT_CODE_NORMAL; } int intintIdentityPerfectHash_DestroyFactory(intintHash_Factory * factory, int hashIndex) {; return HASH_EXIT_CODE_NORMAL; } int intintIdentityPerfectHash_DestroyTable(intintHash_Table * table) { int exitCode = 0; free(table->tableData); free(table); return exitCode; } int intintIdentityPerfectHash_SetupTable(intintHash_Table * table) { int exitCode = 0; intintIdentityPerfectHash_Bucket *buckets = (intintIdentityPerfectHash_Bucket *) & table-> tableData[sizeof(intintIdentityPerfectHash_TableData)]; if (intintHash_GetTableType(table) & ~HASH_SENTINEL_PERFECT_HASHES) { for (int index = 0; index < ((intintIdentityPerfectHash_TableData *) table-> tableData)->numBuckets; index++) { buckets[index].key = HASH_BUCKET_STATUS_EMPTY; }} exitCode = HASH_EXIT_CODE_NORMAL; return exitCode; } int intintIdentityPerfectHash_EmptyTable(intintHash_Table * table) { int exitCode = 0; intintIdentityPerfectHash_Bucket *buckets = (intintIdentityPerfectHash_Bucket *) & table-> tableData[sizeof(intintIdentityPerfectHash_TableData)]; for (int index = 0; index < ((intintIdentityPerfectHash_TableData *) table->tableData)-> numBuckets; index++) { buckets[index].key = HASH_BUCKET_STATUS_EMPTY; } exitCode = HASH_EXIT_CODE_NORMAL; return exitCode; } int intintIdentityPerfectHash_InnerQuerySingle(char *tableData, int key, int *valueOutput) { intintIdentityPerfectHash_Bucket *buckets = (intintIdentityPerfectHash_Bucket *) & tableData[sizeof(intintIdentityPerfectHash_TableData)]; int index; int exitCode; index = intintHash_CompressIdentity(((intintIdentityPerfectHash_TableData *) tableData)->compressFuncData, key); if ((buckets[index].key) != HASH_BUCKET_STATUS_EMPTY) { if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_MISMATCH; } } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: *valueOutput = buckets[index].value; return HASH_EXIT_CODE_NORMAL; case HASH_SEARCH_CODE_MISMATCH: case HASH_SEARCH_CODE_EMPTY: return HASH_EXIT_CODE_KEY_DNE; default: return exitCode; } } int intintIdentityPerfectHash_InnerQuery(char *tableData, unsigned int numKeys, int *keys, int *valuesOutput) { intintIdentityPerfectHash_Bucket *buckets = (intintIdentityPerfectHash_Bucket *) & tableData[sizeof(intintIdentityPerfectHash_TableData)]; int key; int *valueOutput; int index; int exitCode; uint i; int resultExitCode = HASH_EXIT_CODE_NORMAL; for (i = 0; i < numKeys; i++) { key = keys[i]; valueOutput = &valuesOutput[i]; index = intintHash_CompressIdentity(((intintIdentityPerfectHash_TableData *) tableData)->compressFuncData, key); if ((buckets[index].key) != HASH_BUCKET_STATUS_EMPTY) { if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_MISMATCH; } } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: *valueOutput = buckets[index].value; break; case HASH_SEARCH_CODE_MISMATCH: case HASH_SEARCH_CODE_EMPTY: resultExitCode = HASH_EXIT_CODE_KEY_DNE; break; default: return exitCode; } } return resultExitCode; } int intintIdentityPerfectHash_InnerInsertSingle(char *tableData, int key, int value) { intintIdentityPerfectHash_Bucket *buckets = (intintIdentityPerfectHash_Bucket *) & tableData[sizeof(intintIdentityPerfectHash_TableData)]; int index; int exitCode; index = intintHash_CompressIdentity(((intintIdentityPerfectHash_TableData *) tableData)->compressFuncData, key); if (((buckets[index].key == HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key = key, HASH_BUCKET_STATUS_EMPTY) : buckets[index].key) != HASH_BUCKET_STATUS_EMPTY) { if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_MISMATCH; } } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: buckets[index].value = value; return HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = value; return HASH_EXIT_CODE_NORMAL; default: return exitCode; } } int intintIdentityPerfectHash_InnerInsert(char *tableData, unsigned int numEntries, int *keys, int *values) { intintIdentityPerfectHash_Bucket *buckets = (intintIdentityPerfectHash_Bucket *) & tableData[sizeof(intintIdentityPerfectHash_TableData)]; int resultExitCode = HASH_EXIT_CODE_NORMAL; int key; int index; int exitCode; uint i;; for (i = 0; i < numEntries; i++) { key = keys[i]; index = intintHash_CompressIdentity(((intintIdentityPerfectHash_TableData *) tableData)->compressFuncData, key); if (((buckets[index].key == HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key = key, HASH_BUCKET_STATUS_EMPTY) : buckets[index].key) != HASH_BUCKET_STATUS_EMPTY) { if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_MISMATCH; } } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: resultExitCode = HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = values[i]; break; default: resultExitCode = exitCode; } } return resultExitCode; } int intintIdentityPerfectHash_InnerInsertSingleNoOverwrite(char *tableData, int key, int value) { intintIdentityPerfectHash_Bucket *buckets = (intintIdentityPerfectHash_Bucket *) & tableData[sizeof(intintIdentityPerfectHash_TableData)]; int index; int exitCode; index = intintHash_CompressIdentity(((intintIdentityPerfectHash_TableData *) tableData)->compressFuncData, key); if (((buckets[index].key == HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key = key, HASH_BUCKET_STATUS_EMPTY) : buckets[index].key) != HASH_BUCKET_STATUS_EMPTY) { if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_MISMATCH; } } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: return HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = value; return HASH_EXIT_CODE_NORMAL; default: return exitCode; } } int intintIdentityPerfectHash_InnerInsertNoOverwrite(char *tableData, unsigned int numEntries, int *keys, int *values) { intintIdentityPerfectHash_Bucket *buckets = (intintIdentityPerfectHash_Bucket *) & tableData[sizeof(intintIdentityPerfectHash_TableData)]; int resultExitCode = HASH_EXIT_CODE_NORMAL; int key; int index; int exitCode; uint i;; for (i = 0; i < numEntries; i++) { key = keys[i]; index = intintHash_CompressIdentity(((intintIdentityPerfectHash_TableData *) tableData)->compressFuncData, key); if (((buckets[index].key == HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key = key, HASH_BUCKET_STATUS_EMPTY) : buckets[index].key) != HASH_BUCKET_STATUS_EMPTY) { if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_MISMATCH; } } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: resultExitCode = HASH_EXIT_CODE_OVERWRITE; break; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = values[i]; break; default: resultExitCode = exitCode; } } return resultExitCode; } int intintIdentityPerfectHash_QuerySingle(intintHash_Table * table, int key, int *valueOutput) { return intintIdentityPerfectHash_InnerQuerySingle(table->tableData, key, valueOutput); } int intintIdentityPerfectHash_Query(intintHash_Table * table, size_t numKeys, int *keys, int *valuesOutput) { return intintIdentityPerfectHash_InnerQuery(table->tableData, numKeys, keys, valuesOutput); } int intintIdentityPerfectHash_InsertSingle(intintHash_Table * table, int key, int value) { return intintIdentityPerfectHash_InnerInsertSingle(table->tableData, key, value); } int intintIdentityPerfectHash_Insert(intintHash_Table * table, size_t numEntries, int *keys, int *values) { return intintIdentityPerfectHash_InnerInsert(table->tableData, numEntries, keys, values); } int intintIdentityPerfectHash_InsertSingleNoOverwrite(intintHash_Table * table, int key, int value) { return intintIdentityPerfectHash_InnerInsertSingleNoOverwrite(table-> tableData, key, value); } int intintIdentityPerfectHash_InsertNoOverwrite(intintHash_Table * table, size_t numEntries, int *keys, int *values) { return intintIdentityPerfectHash_InnerInsertNoOverwrite(table-> tableData, numEntries, keys, values); } typedef struct intintIdentityPerfectCLHash_TableData { int hashID; unsigned int numBuckets; char compressFuncData; } intintIdentityPerfectCLHash_TableData; typedef struct intintIdentityPerfectCLHash_Bucket { int key; int value; } intintIdentityPerfectCLHash_Bucket; intintHash_Table *intintIdentityPerfectCLHash_CreateTable(intintHash_Factory * factory, int hashIndex, size_t keyRange, size_t numEntries, float loadFactor) { intintHash_Table *table = (intintHash_Table *) malloc(sizeof(intintHash_Table)); table->destroyFunc = &intintIdentityPerfectCLHash_DestroyTable; table->setupFunc = &intintIdentityPerfectCLHash_SetupTable; table->emptyFunc = &intintIdentityPerfectCLHash_EmptyTable; table->queryFunc = &intintIdentityPerfectCLHash_Query; table->querySingleFunc = &intintIdentityPerfectCLHash_QuerySingle; table->insertFunc = &intintIdentityPerfectCLHash_Insert; table->insertSingleFunc = &intintIdentityPerfectCLHash_InsertSingle; table->insertNoOverwriteFunc = &intintIdentityPerfectCLHash_InsertNoOverwrite; table->insertSingleNoOverwriteFunc = &intintIdentityPerfectCLHash_InsertSingleNoOverwrite; table->tableData = (char *)malloc(sizeof(intintIdentityPerfectCLHash_TableData)); ((intintIdentityPerfectCLHash_TableData *) table->tableData)->hashID = IDENTITY_PERFECT_CL_HASH_ID; table->context = factory->context; table->queue = factory->queue; table->program = factory->program; table->localWorkSize = factory->localWorkSize; table->utilProgram = factory->utilProgram[hashIndex]; table->emptyKernel = factory->emptyKernel[hashIndex]; table->emptyKernelLocalWorkSize = factory->emptyKernelLocalWorkSize[hashIndex]; table->querySingleKernel = factory->querySingleKernel[hashIndex]; table->insertSingleKernel = factory->insertSingleKernel[hashIndex]; table->insertSingleNoOverwriteKernel = factory->insertSingleNoOverwriteKernel[hashIndex]; clRetainContext(table->context); clRetainCommandQueue(table->queue); clRetainProgram(table->program); clRetainProgram(table->utilProgram); clRetainKernel(table->emptyKernel); clRetainKernel(table->querySingleKernel); clRetainKernel(table->insertSingleKernel); clRetainKernel(table->insertSingleNoOverwriteKernel);; ((intintIdentityPerfectCLHash_TableData *) table->tableData)-> numBuckets = keyRange + 1; char *tempHashData = (char *)malloc(sizeof(intintIdentityPerfectCLHash_TableData) + ((intintIdentityPerfectCLHash_TableData *) table-> tableData)->numBuckets * sizeof(intintIdentityPerfectCLHash_Bucket)); memcpy(tempHashData, table->tableData, sizeof(intintIdentityPerfectCLHash_TableData)); free(table->tableData); table->tableData = tempHashData; cl_int err; table->tableDataBuffer = clCreateBuffer(table->context, CL_MEM_READ_WRITE, sizeof(intintIdentityPerfectHash_TableData) + ((intintIdentityPerfectHash_TableData *) table-> tableData)->numBuckets * sizeof(intintIdentityPerfectHash_Bucket), NULL, &err); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_InitTable", "clCreateBuffer"); err = clEnqueueWriteBuffer(table->queue, table->tableDataBuffer, CL_TRUE, 0, sizeof(intintIdentityPerfectHash_TableData), table->tableData, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_InitTable", "clEnqueueWriteBuffer"); return table; } int intintIdentityPerfectCLHash_CreateFactory(intintHash_Factory * factory, int hashIndex) { factory->createFunc[hashIndex] = &intintIdentityPerfectCLHash_CreateTable; factory->destroyFunc[hashIndex] = &intintIdentityPerfectCLHash_DestroyFactory; cl_int error; cl_device_id device; error = clGetContextInfo(factory->context, CL_CONTEXT_DEVICES, sizeof(device), &device, NULL); CLHash_Utilities_HandleError(error, "intintHash_CreateFactory", "clGetContextInfo"); factory->querySingleKernel[hashIndex] = clCreateKernel(factory->program, "intintIdentityPerfectCLHash_RangeQuerySingle", &error); CLHash_Utilities_HandleError(error, "intintIdentityPerfectCLHash_CreateFactory", "clCreateKernel"); factory->insertSingleKernel[hashIndex] = clCreateKernel(factory->program, "intintIdentityPerfectCLHash_RangeInsertSingle", &error); CLHash_Utilities_HandleError(error, "intintIdentityPerfectCLHash_CreateFactory", "clCreateKernel"); factory->insertSingleNoOverwriteKernel[hashIndex] = clCreateKernel(factory->program, "intintIdentityPerfectCLHash_RangeInsertSingleNoOverwrite", &error); CLHash_Utilities_HandleError(error, "intintIdentityPerfectCLHash_CreateFactory", "clCreateKernel"); factory->utilProgram[hashIndex] = CLHash_Utilities_BuildProgramString(factory->context, device, "static inline unsigned int intintHash_CompressIdentity(char data, int hashCode){ return hashCode; } typedef struct intintHash_CompressLCGData{ long unsigned int a; long unsigned int c; unsigned int m; unsigned int n; }intintHash_CompressLCGData; static inline unsigned int intintHash_CompressLCG(intintHash_CompressLCGData compressLCGData, int hashCode){ return ((compressLCGData.a * hashCode + compressLCGData.c) % compressLCGData.m) % compressLCGData.n; } typedef struct intintIdentityPerfectCLHash_TableData{ int hashID; unsigned int numBuckets; char compressFuncData; }intintIdentityPerfectCLHash_TableData; typedef struct intintIdentityPerfectCLHash_Bucket{ int key; int value; }intintIdentityPerfectCLHash_Bucket; __kernel void intintIdentityPerfectCLHash_Empty(__global char *tableData){ int index = get_global_id(0); if(index >= ((__global intintIdentityPerfectCLHash_TableData*)tableData)->numBuckets){ return; } __global intintIdentityPerfectCLHash_Bucket *buckets = (__global intintIdentityPerfectCLHash_Bucket*)&tableData[sizeof(intintIdentityPerfectCLHash_TableData)]; buckets[index].key = -1;/*HASH_BUCKET_STATUS_EMPTY*/ }"); factory->emptyKernel[hashIndex] = clCreateKernel(factory->utilProgram[hashIndex], "intintIdentityPerfectCLHash_Empty", &error); CLHash_Utilities_HandleError(error, "intintIdentityPerfectCLHash_CreateFactory", "clCreateKernel"); error = clGetKernelWorkGroupInfo(factory->emptyKernel[hashIndex], device, CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), &factory-> emptyKernelLocalWorkSize[hashIndex], NULL); CLHash_Utilities_HandleError(error, "intintIdentityPerfectCLHash_CreateFactory", "clGetKernelWorkGroupInfo");;; return HASH_EXIT_CODE_NORMAL; } int intintIdentityPerfectCLHash_DestroyFactory(intintHash_Factory * factory, int hashIndex) {; clReleaseKernel(factory->emptyKernel[hashIndex]); clReleaseProgram(factory->utilProgram[hashIndex]); clReleaseKernel(factory->querySingleKernel[hashIndex]); clReleaseKernel(factory->insertSingleKernel[hashIndex]); clReleaseKernel(factory->insertSingleNoOverwriteKernel[hashIndex]);; return HASH_EXIT_CODE_NORMAL; } int intintIdentityPerfectCLHash_DestroyTable(intintHash_Table * table) { int exitCode = 0; clReleaseMemObject(table->tableDataBuffer); clReleaseContext(table->context); clReleaseCommandQueue(table->queue); clReleaseProgram(table->utilProgram); clReleaseKernel(table->emptyKernel); clReleaseProgram(table->program); clReleaseKernel(table->querySingleKernel); clReleaseKernel(table->insertSingleKernel); clReleaseKernel(table->insertSingleNoOverwriteKernel); free(table->tableData); free(table); return exitCode; } int intintIdentityPerfectCLHash_SetupTable(intintHash_Table * table) { int exitCode = 0; cl_int err; err = clSetKernelArg(table->emptyKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_EmptyTable", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(((intintIdentityPerfectHash_TableData *) table-> tableData)->numBuckets, table->emptyKernelLocalWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->emptyKernel, 1, 0, &groupWorkSize, (const size_t *)&table-> emptyKernelLocalWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_EmptyTable", "clEnqueueNDRangeKernel"); exitCode = HASH_EXIT_CODE_NORMAL;; return exitCode; } int intintIdentityPerfectCLHash_EmptyTable(intintHash_Table * table) { int exitCode = 0; cl_int err; err = clSetKernelArg(table->emptyKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_EmptyTable", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(((intintIdentityPerfectHash_TableData *) table-> tableData)->numBuckets, table->emptyKernelLocalWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->emptyKernel, 1, 0, &groupWorkSize, (const size_t *)&table-> emptyKernelLocalWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_EmptyTable", "clEnqueueNDRangeKernel"); exitCode = HASH_EXIT_CODE_NORMAL;; return exitCode; } int intintIdentityPerfectCLHash_QuerySingle(intintHash_Table * table, int key, int *valueOutput) { return intintIdentityPerfectCLHash_Query(table, 1, &key, valueOutput); } int intintIdentityPerfectCLHash_Query(intintHash_Table * table, size_t numKeys, int *keys, int *valuesOutput) { cl_int err; cl_mem keysBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numKeys, keys, &err); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_Query", "clCreateBuffer"); cl_mem valuesOutputBuffer = clCreateBuffer(table->context, CL_MEM_WRITE_ONLY, sizeof(int) * numKeys, NULL, &err); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_Query", "clCreateBuffer"); intintIdentityPerfectCLHash_BufferQuery(table, numKeys, keysBuffer, valuesOutputBuffer); err = clEnqueueReadBuffer(table->queue, valuesOutputBuffer, CL_TRUE, 0, sizeof(int) * numKeys, valuesOutput, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_Query", "clEnqueueReadBuffer"); clReleaseMemObject(keysBuffer); clReleaseMemObject(valuesOutputBuffer); return HASH_EXIT_CODE_NORMAL; } int intintIdentityPerfectCLHash_BufferQuery(intintHash_Table * table, size_t numKeys, cl_mem keysBuffer, cl_mem valuesOutputBuffer) { cl_int err; err = clSetKernelArg(table->querySingleKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_BufferQuery", "clSetKernelArg"); err = clSetKernelArg(table->querySingleKernel, 1, sizeof(unsigned int), &numKeys); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_BufferQuery", "clSetKernelArg"); err = clSetKernelArg(table->querySingleKernel, 2, sizeof(cl_mem), &keysBuffer); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_BufferQuery", "clSetKernelArg"); err = clSetKernelArg(table->querySingleKernel, 3, sizeof(cl_mem), &valuesOutputBuffer); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_BufferQuery", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(numKeys, table->localWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->querySingleKernel, 1, 0, &groupWorkSize, (const size_t *)&table->localWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_BufferQuery", "clEnqueueNDRangeKernel"); clFinish(table->queue); return HASH_EXIT_CODE_NORMAL; } int intintIdentityPerfectCLHash_InsertSingle(intintHash_Table * table, int key, int value) { return intintIdentityPerfectCLHash_Insert(table, 1, &key, &value); } int intintIdentityPerfectCLHash_Insert(intintHash_Table * table, size_t numEntries, int *keys, int *values) { cl_int err; cl_mem keysBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numEntries, keys, &err); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_Insert", "clCreateBuffer"); cl_mem valuesBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numEntries, values, &err); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_Insert", "clCreateBuffer"); intintIdentityPerfectCLHash_BufferInsert(table, numEntries, keysBuffer, valuesBuffer); clReleaseMemObject(keysBuffer); clReleaseMemObject(valuesBuffer); return HASH_EXIT_CODE_NORMAL; } int intintIdentityPerfectCLHash_BufferInsert(intintHash_Table * table, size_t numEntries, cl_mem keysBuffer, cl_mem valuesBuffer) { cl_int err; err = clSetKernelArg(table->insertSingleKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_BufferInsert", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleKernel, 1, sizeof(unsigned int), &numEntries); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_BufferInsert", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleKernel, 2, sizeof(cl_mem), &keysBuffer); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_BufferInsert", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleKernel, 3, sizeof(cl_mem), &valuesBuffer); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_BufferInsert", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(numEntries, table->localWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->insertSingleKernel, 1, 0, &groupWorkSize, (const size_t *)&table->localWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, NULL, "clEnqueueNDRangeKernel"); return (0); } int intintIdentityPerfectCLHash_InsertSingleNoOverwrite(intintHash_Table * table, int key, int value) { return intintIdentityPerfectCLHash_InsertNoOverwrite(table, 1, &key, &value); } int intintIdentityPerfectCLHash_InsertNoOverwrite(intintHash_Table * table, size_t numEntries, int *keys, int *values) { cl_int err; cl_mem keysBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numEntries, keys, &err); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_InsertNoOverwrite", "clCreateBuffer"); cl_mem valuesBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numEntries, values, &err); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_InsertNoOverwrite", "clCreateBuffer"); intintIdentityPerfectCLHash_BufferInsertNoOverwrite(table, numEntries, keysBuffer, valuesBuffer); clReleaseMemObject(keysBuffer); clReleaseMemObject(valuesBuffer); return HASH_EXIT_CODE_NORMAL; } int intintIdentityPerfectCLHash_BufferInsertNoOverwrite(intintHash_Table * table, size_t numEntries, cl_mem keysBuffer, cl_mem valuesBuffer) { cl_int err; err = clSetKernelArg(table->insertSingleNoOverwriteKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_BufferInsertNoOverwrite", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleNoOverwriteKernel, 1, sizeof(unsigned int), &numEntries); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_BufferInsertNoOverwrite", "ClSetKernelArg"); err = clSetKernelArg(table->insertSingleNoOverwriteKernel, 2, sizeof(cl_mem), &keysBuffer); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_BufferInsertNoOverwrite", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleNoOverwriteKernel, 3, sizeof(cl_mem), &valuesBuffer); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_BufferInsertNoOverwrite", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(numEntries, table->localWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->insertSingleNoOverwriteKernel, 1, 0, &groupWorkSize, (const size_t *)&table->localWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintIdentityPerfectCLHash_BufferInsertNoOverwrite", "clEnqueueNDRangeKernel"); return (0); } typedef struct intintIdentityPerfectOpenMPHash_TableData { int hashID; unsigned int numBuckets; char compressFuncData; } intintIdentityPerfectOpenMPHash_TableData; typedef struct intintIdentityPerfectOpenMPHash_Bucket { int key; int value; } intintIdentityPerfectOpenMPHash_Bucket; intintHash_Table *intintIdentityPerfectOpenMPHash_CreateTable(intintHash_Factory * factory, int hashIndex, size_t keyRange, size_t numEntries, float loadFactor) { intintHash_Table *table = (intintHash_Table *) malloc(sizeof(intintHash_Table)); table->destroyFunc = &intintIdentityPerfectOpenMPHash_DestroyTable; table->setupFunc = &intintIdentityPerfectOpenMPHash_SetupTable; table->emptyFunc = &intintIdentityPerfectOpenMPHash_EmptyTable; table->queryFunc = &intintIdentityPerfectOpenMPHash_Query; table->querySingleFunc = &intintIdentityPerfectOpenMPHash_QuerySingle; table->insertFunc = &intintIdentityPerfectOpenMPHash_Insert; table->insertSingleFunc = &intintIdentityPerfectOpenMPHash_InsertSingle; table->insertNoOverwriteFunc = &intintIdentityPerfectOpenMPHash_InsertNoOverwrite; table->insertSingleNoOverwriteFunc = &intintIdentityPerfectOpenMPHash_InsertSingleNoOverwrite; table->tableData = (char *)malloc(sizeof(intintIdentityPerfectOpenMPHash_TableData)); ((intintIdentityPerfectOpenMPHash_TableData *) table->tableData)-> hashID = IDENTITY_PERFECT_OPENMP_HASH_ID; ((intintIdentityPerfectOpenMPHash_TableData *) table->tableData)-> numBuckets = keyRange + 1; char *tempHashData = (char *)malloc(sizeof(intintIdentityPerfectOpenMPHash_TableData) + ((intintIdentityPerfectOpenMPHash_TableData *) table->tableData)->numBuckets * sizeof(intintIdentityPerfectOpenMPHash_Bucket)); memcpy(tempHashData, table->tableData, sizeof(intintIdentityPerfectOpenMPHash_TableData)); free(table->tableData); table->tableData = tempHashData; return table; } int intintIdentityPerfectOpenMPHash_CreateFactory(intintHash_Factory * factory, int hashIndex) { factory->createFunc[hashIndex] = &intintIdentityPerfectOpenMPHash_CreateTable; factory->destroyFunc[hashIndex] = &intintIdentityPerfectOpenMPHash_DestroyFactory;; return HASH_EXIT_CODE_NORMAL; } int intintIdentityPerfectOpenMPHash_DestroyFactory(intintHash_Factory * factory, int hashIndex) {; return HASH_EXIT_CODE_NORMAL; } int intintIdentityPerfectOpenMPHash_DestroyTable(intintHash_Table * table) { int exitCode = 0; free(table->tableData); free(table); return exitCode; } int intintIdentityPerfectOpenMPHash_SetupTable(intintHash_Table * table) { int exitCode = 0; intintIdentityPerfectOpenMPHash_Bucket *buckets = (intintIdentityPerfectOpenMPHash_Bucket *) & table-> tableData[sizeof(intintIdentityPerfectOpenMPHash_TableData)]; if (intintHash_GetTableType(table) & ~HASH_SENTINEL_PERFECT_HASHES) { #pragma omp parallel for for (int index = 0; index < ((intintIdentityPerfectOpenMPHash_TableData *) table-> tableData)->numBuckets; index++) { buckets[index].key = HASH_BUCKET_STATUS_EMPTY; }} exitCode = HASH_EXIT_CODE_NORMAL; return exitCode; } int intintIdentityPerfectOpenMPHash_EmptyTable(intintHash_Table * table) { int exitCode = 0; intintIdentityPerfectOpenMPHash_Bucket *buckets = (intintIdentityPerfectOpenMPHash_Bucket *) & table-> tableData[sizeof(intintIdentityPerfectOpenMPHash_TableData)]; #pragma omp parallel for for (int index = 0; index < ((intintIdentityPerfectOpenMPHash_TableData *) table->tableData)-> numBuckets; index++) { buckets[index].key = HASH_BUCKET_STATUS_EMPTY; } exitCode = HASH_EXIT_CODE_NORMAL; return exitCode; } int intintIdentityPerfectOpenMPHash_InnerQuerySingle(char *tableData, int key, int *valueOutput) { intintIdentityPerfectOpenMPHash_Bucket *buckets = (intintIdentityPerfectOpenMPHash_Bucket *) & tableData[sizeof(intintIdentityPerfectOpenMPHash_TableData)]; int index; int exitCode; index = intintHash_CompressIdentity(((intintIdentityPerfectOpenMPHash_TableData *) tableData)->compressFuncData, key); if ((buckets[index].key) != HASH_BUCKET_STATUS_EMPTY) { if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_MISMATCH; } } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: *valueOutput = buckets[index].value; return HASH_EXIT_CODE_NORMAL; case HASH_SEARCH_CODE_MISMATCH: case HASH_SEARCH_CODE_EMPTY: return HASH_EXIT_CODE_KEY_DNE; default: return exitCode; } } int intintIdentityPerfectOpenMPHash_InnerQuery(char *tableData, unsigned int numKeys, int *keys, int *valuesOutput) { intintIdentityPerfectOpenMPHash_Bucket *buckets = (intintIdentityPerfectOpenMPHash_Bucket *) & tableData[sizeof(intintIdentityPerfectOpenMPHash_TableData)]; int key; int *valueOutput; int index; int exitCode; uint i; int resultExitCode = HASH_EXIT_CODE_NORMAL; for (i = 0; i < numKeys; i++) { key = keys[i]; valueOutput = &valuesOutput[i]; index = intintHash_CompressIdentity(((intintIdentityPerfectOpenMPHash_TableData *) tableData)->compressFuncData, key); if ((buckets[index].key) != HASH_BUCKET_STATUS_EMPTY) { if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_MISMATCH; } } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: *valueOutput = buckets[index].value; break; case HASH_SEARCH_CODE_MISMATCH: case HASH_SEARCH_CODE_EMPTY: resultExitCode = HASH_EXIT_CODE_KEY_DNE; break; default: return exitCode; } } return resultExitCode; } int intintIdentityPerfectOpenMPHash_InnerInsertSingle(char *tableData, int key, int value) { intintIdentityPerfectOpenMPHash_Bucket *buckets = (intintIdentityPerfectOpenMPHash_Bucket *) & tableData[sizeof(intintIdentityPerfectOpenMPHash_TableData)]; int index; int exitCode; index = intintHash_CompressIdentity(((intintIdentityPerfectOpenMPHash_TableData *) tableData)->compressFuncData, key); if (((buckets[index].key == HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key = key, HASH_BUCKET_STATUS_EMPTY) : buckets[index].key) != HASH_BUCKET_STATUS_EMPTY) { if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_MISMATCH; } } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: buckets[index].value = value; return HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = value; return HASH_EXIT_CODE_NORMAL; default: return exitCode; } } int intintIdentityPerfectOpenMPHash_InnerInsert(char *tableData, unsigned int numEntries, int *keys, int *values) { intintIdentityPerfectOpenMPHash_Bucket *buckets = (intintIdentityPerfectOpenMPHash_Bucket *) & tableData[sizeof(intintIdentityPerfectOpenMPHash_TableData)]; int resultExitCode = HASH_EXIT_CODE_NORMAL; int key; int index; int exitCode; uint i; #pragma omp parallel for for (i = 0; i < numEntries; i++) { key = keys[i]; index = intintHash_CompressIdentity(((intintIdentityPerfectOpenMPHash_TableData *) tableData)->compressFuncData, key); if (((buckets[index].key == HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key = key, HASH_BUCKET_STATUS_EMPTY) : buckets[index].key) != HASH_BUCKET_STATUS_EMPTY) { if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_MISMATCH; } } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: resultExitCode = HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = values[i]; break; default: resultExitCode = exitCode; } } return resultExitCode; } int intintIdentityPerfectOpenMPHash_InnerInsertSingleNoOverwrite(char *tableData, int key, int value) { intintIdentityPerfectOpenMPHash_Bucket *buckets = (intintIdentityPerfectOpenMPHash_Bucket *) & tableData[sizeof(intintIdentityPerfectOpenMPHash_TableData)]; int index; int exitCode; index = intintHash_CompressIdentity(((intintIdentityPerfectOpenMPHash_TableData *) tableData)->compressFuncData, key); if (((buckets[index].key == HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key = key, HASH_BUCKET_STATUS_EMPTY) : buckets[index].key) != HASH_BUCKET_STATUS_EMPTY) { if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_MISMATCH; } } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: return HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = value; return HASH_EXIT_CODE_NORMAL; default: return exitCode; } } int intintIdentityPerfectOpenMPHash_InnerInsertNoOverwrite(char *tableData, unsigned int numEntries, int *keys, int *values) { intintIdentityPerfectOpenMPHash_Bucket *buckets = (intintIdentityPerfectOpenMPHash_Bucket *) & tableData[sizeof(intintIdentityPerfectOpenMPHash_TableData)]; int resultExitCode = HASH_EXIT_CODE_NORMAL; int key; int index; int exitCode; uint i; #pragma omp parallel for for (i = 0; i < numEntries; i++) { key = keys[i]; index = intintHash_CompressIdentity(((intintIdentityPerfectOpenMPHash_TableData *) tableData)->compressFuncData, key); if (((buckets[index].key == HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key = key, HASH_BUCKET_STATUS_EMPTY) : buckets[index].key) != HASH_BUCKET_STATUS_EMPTY) { if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_MISMATCH; } } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: resultExitCode = HASH_EXIT_CODE_OVERWRITE; break; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = values[i]; break; default: resultExitCode = exitCode; } } return resultExitCode; } int intintIdentityPerfectOpenMPHash_QuerySingle(intintHash_Table * table, int key, int *valueOutput) { return intintIdentityPerfectOpenMPHash_InnerQuerySingle(table-> tableData, key, valueOutput); } int intintIdentityPerfectOpenMPHash_Query(intintHash_Table * table, size_t numKeys, int *keys, int *valuesOutput) { return intintIdentityPerfectOpenMPHash_InnerQuery(table->tableData, numKeys, keys, valuesOutput); } int intintIdentityPerfectOpenMPHash_InsertSingle(intintHash_Table * table, int key, int value) { return intintIdentityPerfectOpenMPHash_InnerInsertSingle(table-> tableData, key, value); } int intintIdentityPerfectOpenMPHash_Insert(intintHash_Table * table, size_t numEntries, int *keys, int *values) { return intintIdentityPerfectOpenMPHash_InnerInsert(table->tableData, numEntries, keys, values); } int intintIdentityPerfectOpenMPHash_InsertSingleNoOverwrite(intintHash_Table * table, int key, int value) { return intintIdentityPerfectOpenMPHash_InnerInsertSingleNoOverwrite(table-> tableData, key, value); } int intintIdentityPerfectOpenMPHash_InsertNoOverwrite(intintHash_Table * table, size_t numEntries, int *keys, int *values) { return intintIdentityPerfectOpenMPHash_InnerInsertNoOverwrite(table-> tableData, numEntries, keys, values); } typedef struct intintIdentitySentinelPerfectHash_TableData { int hashID; unsigned int numBuckets; char compressFuncData; int emptyValue; } intintIdentitySentinelPerfectHash_TableData; typedef struct intintIdentitySentinelPerfectHash_Bucket { int value; } intintIdentitySentinelPerfectHash_Bucket; intintHash_Table *intintIdentitySentinelPerfectHash_CreateTable(intintHash_Factory * factory, int hashIndex, size_t keyRange, size_t numEntries, float loadFactor) { intintHash_Table *table = (intintHash_Table *) malloc(sizeof(intintHash_Table)); table->destroyFunc = &intintIdentitySentinelPerfectHash_DestroyTable; table->setupFunc = &intintIdentitySentinelPerfectHash_SetupTable; table->emptyFunc = &intintIdentitySentinelPerfectHash_EmptyTable; table->queryFunc = &intintIdentitySentinelPerfectHash_Query; table->querySingleFunc = &intintIdentitySentinelPerfectHash_QuerySingle; table->insertFunc = &intintIdentitySentinelPerfectHash_Insert; table->insertSingleFunc = &intintIdentitySentinelPerfectHash_InsertSingle; table->insertNoOverwriteFunc = &intintIdentitySentinelPerfectHash_InsertNoOverwrite; table->insertSingleNoOverwriteFunc = &intintIdentitySentinelPerfectHash_InsertSingleNoOverwrite; table->tableData = (char *)malloc(sizeof(intintIdentitySentinelPerfectHash_TableData)); ((intintIdentitySentinelPerfectHash_TableData *) table->tableData)-> hashID = IDENTITY_SENTINEL_PERFECT_HASH_ID; ((intintIdentitySentinelPerfectHash_TableData *) table->tableData)-> emptyValue = factory->emptyValue; ((intintIdentitySentinelPerfectHash_TableData *) table->tableData)-> numBuckets = keyRange + 1; char *tempHashData = (char *)malloc(sizeof(intintIdentitySentinelPerfectHash_TableData) + ((intintIdentitySentinelPerfectHash_TableData *) table->tableData)->numBuckets * sizeof(intintIdentitySentinelPerfectHash_Bucket)); memcpy(tempHashData, table->tableData, sizeof(intintIdentitySentinelPerfectHash_TableData)); free(table->tableData); table->tableData = tempHashData; return table; } int intintIdentitySentinelPerfectHash_CreateFactory(intintHash_Factory * factory, int hashIndex) { factory->createFunc[hashIndex] = &intintIdentitySentinelPerfectHash_CreateTable; factory->destroyFunc[hashIndex] = &intintIdentitySentinelPerfectHash_DestroyFactory;; return HASH_EXIT_CODE_NORMAL; } int intintIdentitySentinelPerfectHash_DestroyFactory(intintHash_Factory * factory, int hashIndex) {; return HASH_EXIT_CODE_NORMAL; } int intintIdentitySentinelPerfectHash_DestroyTable(intintHash_Table * table) { int exitCode = 0; free(table->tableData); free(table); return exitCode; } int intintIdentitySentinelPerfectHash_SetupTable(intintHash_Table * table) { int exitCode = 0; intintIdentitySentinelPerfectHash_Bucket *buckets = (intintIdentitySentinelPerfectHash_Bucket *) & table-> tableData[sizeof(intintIdentitySentinelPerfectHash_TableData)]; if (intintHash_GetTableType(table) & ~HASH_SENTINEL_PERFECT_HASHES) { for (int index = 0; index < ((intintIdentitySentinelPerfectHash_TableData *) table-> tableData)->numBuckets; index++) { buckets[index].value = ((intintIdentitySentinelPerfectHash_TableData *) table->tableData)->emptyValue; }} exitCode = HASH_EXIT_CODE_NORMAL; return exitCode; } int intintIdentitySentinelPerfectHash_EmptyTable(intintHash_Table * table) { int exitCode = 0; intintIdentitySentinelPerfectHash_Bucket *buckets = (intintIdentitySentinelPerfectHash_Bucket *) & table-> tableData[sizeof(intintIdentitySentinelPerfectHash_TableData)]; for (int index = 0; index < ((intintIdentitySentinelPerfectHash_TableData *) table-> tableData)->numBuckets; index++) { buckets[index].value = ((intintIdentitySentinelPerfectHash_TableData *) table-> tableData)->emptyValue; } exitCode = HASH_EXIT_CODE_NORMAL; return exitCode; } int intintIdentitySentinelPerfectHash_InnerQuerySingle(char *tableData, int key, int *valueOutput) { intintIdentitySentinelPerfectHash_Bucket *buckets = (intintIdentitySentinelPerfectHash_Bucket *) & tableData[sizeof(intintIdentitySentinelPerfectHash_TableData)]; int index; int exitCode; index = intintHash_CompressIdentity(((intintIdentitySentinelPerfectHash_TableData *) tableData)->compressFuncData, key); if (buckets[index].value != ((intintIdentitySentinelPerfectHash_TableData *) tableData)-> emptyValue) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: *valueOutput = buckets[index].value; return HASH_EXIT_CODE_NORMAL; case HASH_SEARCH_CODE_MISMATCH: case HASH_SEARCH_CODE_EMPTY: return HASH_EXIT_CODE_KEY_DNE; default: return exitCode; } } int intintIdentitySentinelPerfectHash_InnerQuery(char *tableData, unsigned int numKeys, int *keys, int *valuesOutput) { intintIdentitySentinelPerfectHash_Bucket *buckets = (intintIdentitySentinelPerfectHash_Bucket *) & tableData[sizeof(intintIdentitySentinelPerfectHash_TableData)]; int key; int *valueOutput; int index; int exitCode; uint i; int resultExitCode = HASH_EXIT_CODE_NORMAL; for (i = 0; i < numKeys; i++) { key = keys[i]; valueOutput = &valuesOutput[i]; index = intintHash_CompressIdentity(((intintIdentitySentinelPerfectHash_TableData *) tableData)->compressFuncData, key); if (buckets[index].value != ((intintIdentitySentinelPerfectHash_TableData *) tableData)->emptyValue) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: *valueOutput = buckets[index].value; break; case HASH_SEARCH_CODE_MISMATCH: case HASH_SEARCH_CODE_EMPTY: resultExitCode = HASH_EXIT_CODE_KEY_DNE; break; default: return exitCode; } } return resultExitCode; } int intintIdentitySentinelPerfectHash_InnerInsertSingle(char *tableData, int key, int value) { intintIdentitySentinelPerfectHash_Bucket *buckets = (intintIdentitySentinelPerfectHash_Bucket *) & tableData[sizeof(intintIdentitySentinelPerfectHash_TableData)]; int index; int exitCode; index = intintHash_CompressIdentity(((intintIdentitySentinelPerfectHash_TableData *) tableData)->compressFuncData, key); if (buckets[index].value != ((intintIdentitySentinelPerfectHash_TableData *) tableData)-> emptyValue) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: buckets[index].value = value; return HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = value; return HASH_EXIT_CODE_NORMAL; default: return exitCode; } } int intintIdentitySentinelPerfectHash_InnerInsert(char *tableData, unsigned int numEntries, int *keys, int *values) { intintIdentitySentinelPerfectHash_Bucket *buckets = (intintIdentitySentinelPerfectHash_Bucket *) & tableData[sizeof(intintIdentitySentinelPerfectHash_TableData)]; int resultExitCode = HASH_EXIT_CODE_NORMAL; int key; int index; int exitCode; uint i;; for (i = 0; i < numEntries; i++) { key = keys[i]; index = intintHash_CompressIdentity(((intintIdentitySentinelPerfectHash_TableData *) tableData)->compressFuncData, key); if (buckets[index].value != ((intintIdentitySentinelPerfectHash_TableData *) tableData)->emptyValue) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: resultExitCode = HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = values[i]; break; default: resultExitCode = exitCode; } } return resultExitCode; } int intintIdentitySentinelPerfectHash_InnerInsertSingleNoOverwrite(char *tableData, int key, int value) { intintIdentitySentinelPerfectHash_Bucket *buckets = (intintIdentitySentinelPerfectHash_Bucket *) & tableData[sizeof(intintIdentitySentinelPerfectHash_TableData)]; int index; int exitCode; index = intintHash_CompressIdentity(((intintIdentitySentinelPerfectHash_TableData *) tableData)->compressFuncData, key); if (buckets[index].value != ((intintIdentitySentinelPerfectHash_TableData *) tableData)-> emptyValue) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: return HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = value; return HASH_EXIT_CODE_NORMAL; default: return exitCode; } } int intintIdentitySentinelPerfectHash_InnerInsertNoOverwrite(char *tableData, unsigned int numEntries, int *keys, int *values) { intintIdentitySentinelPerfectHash_Bucket *buckets = (intintIdentitySentinelPerfectHash_Bucket *) & tableData[sizeof(intintIdentitySentinelPerfectHash_TableData)]; int resultExitCode = HASH_EXIT_CODE_NORMAL; int key; int index; int exitCode; uint i;; for (i = 0; i < numEntries; i++) { key = keys[i]; index = intintHash_CompressIdentity(((intintIdentitySentinelPerfectHash_TableData *) tableData)->compressFuncData, key); if (buckets[index].value != ((intintIdentitySentinelPerfectHash_TableData *) tableData)->emptyValue) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: resultExitCode = HASH_EXIT_CODE_OVERWRITE; break; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = values[i]; break; default: resultExitCode = exitCode; } } return resultExitCode; } int intintIdentitySentinelPerfectHash_QuerySingle(intintHash_Table * table, int key, int *valueOutput) { return intintIdentitySentinelPerfectHash_InnerQuerySingle(table-> tableData, key, valueOutput); } int intintIdentitySentinelPerfectHash_Query(intintHash_Table * table, size_t numKeys, int *keys, int *valuesOutput) { return intintIdentitySentinelPerfectHash_InnerQuery(table->tableData, numKeys, keys, valuesOutput); } int intintIdentitySentinelPerfectHash_InsertSingle(intintHash_Table * table, int key, int value) { return intintIdentitySentinelPerfectHash_InnerInsertSingle(table-> tableData, key, value); } int intintIdentitySentinelPerfectHash_Insert(intintHash_Table * table, size_t numEntries, int *keys, int *values) { return intintIdentitySentinelPerfectHash_InnerInsert(table->tableData, numEntries, keys, values); } int intintIdentitySentinelPerfectHash_InsertSingleNoOverwrite(intintHash_Table * table, int key, int value) { return intintIdentitySentinelPerfectHash_InnerInsertSingleNoOverwrite (table->tableData, key, value); } int intintIdentitySentinelPerfectHash_InsertNoOverwrite(intintHash_Table * table, size_t numEntries, int *keys, int *values) { return intintIdentitySentinelPerfectHash_InnerInsertNoOverwrite(table-> tableData, numEntries, keys, values); } typedef struct intintIdentitySentinelPerfectCLHash_TableData { int hashID; unsigned int numBuckets; char compressFuncData; int emptyValue; } intintIdentitySentinelPerfectCLHash_TableData; typedef struct intintIdentitySentinelPerfectCLHash_Bucket { int value; } intintIdentitySentinelPerfectCLHash_Bucket; intintHash_Table *intintIdentitySentinelPerfectCLHash_CreateTable(intintHash_Factory * factory, int hashIndex, size_t keyRange, size_t numEntries, float loadFactor) { intintHash_Table *table = (intintHash_Table *) malloc(sizeof(intintHash_Table)); table->destroyFunc = &intintIdentitySentinelPerfectCLHash_DestroyTable; table->setupFunc = &intintIdentitySentinelPerfectCLHash_SetupTable; table->emptyFunc = &intintIdentitySentinelPerfectCLHash_EmptyTable; table->queryFunc = &intintIdentitySentinelPerfectCLHash_Query; table->querySingleFunc = &intintIdentitySentinelPerfectCLHash_QuerySingle; table->insertFunc = &intintIdentitySentinelPerfectCLHash_Insert; table->insertSingleFunc = &intintIdentitySentinelPerfectCLHash_InsertSingle; table->insertNoOverwriteFunc = &intintIdentitySentinelPerfectCLHash_InsertNoOverwrite; table->insertSingleNoOverwriteFunc = &intintIdentitySentinelPerfectCLHash_InsertSingleNoOverwrite; table->tableData = (char *) malloc(sizeof(intintIdentitySentinelPerfectCLHash_TableData)); ((intintIdentitySentinelPerfectCLHash_TableData *) table->tableData)-> hashID = IDENTITY_SENTINEL_PERFECT_CL_HASH_ID; table->context = factory->context; table->queue = factory->queue; table->program = factory->program; table->localWorkSize = factory->localWorkSize; table->utilProgram = factory->utilProgram[hashIndex]; table->emptyKernel = factory->emptyKernel[hashIndex]; table->emptyKernelLocalWorkSize = factory->emptyKernelLocalWorkSize[hashIndex]; table->querySingleKernel = factory->querySingleKernel[hashIndex]; table->insertSingleKernel = factory->insertSingleKernel[hashIndex]; table->insertSingleNoOverwriteKernel = factory->insertSingleNoOverwriteKernel[hashIndex]; clRetainContext(table->context); clRetainCommandQueue(table->queue); clRetainProgram(table->program); clRetainProgram(table->utilProgram); clRetainKernel(table->emptyKernel); clRetainKernel(table->querySingleKernel); clRetainKernel(table->insertSingleKernel); clRetainKernel(table->insertSingleNoOverwriteKernel);; ((intintIdentitySentinelPerfectCLHash_TableData *) table->tableData)-> emptyValue = factory->emptyValue; ((intintIdentitySentinelPerfectCLHash_TableData *) table->tableData)-> numBuckets = keyRange + 1; char *tempHashData = (char *)malloc(sizeof(intintIdentitySentinelPerfectCLHash_TableData) + ((intintIdentitySentinelPerfectCLHash_TableData *) table->tableData)->numBuckets * sizeof(intintIdentitySentinelPerfectCLHash_Bucket)); memcpy(tempHashData, table->tableData, sizeof(intintIdentitySentinelPerfectCLHash_TableData)); free(table->tableData); table->tableData = tempHashData; cl_int err; table->tableDataBuffer = clCreateBuffer(table->context, CL_MEM_READ_WRITE, sizeof(intintIdentitySentinelPerfectHash_TableData) + ((intintIdentitySentinelPerfectHash_TableData *) table->tableData)->numBuckets * sizeof(intintIdentitySentinelPerfectHash_Bucket), NULL, &err); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_InitTable", "clCreateBuffer"); err = clEnqueueWriteBuffer(table->queue, table->tableDataBuffer, CL_TRUE, 0, sizeof (intintIdentitySentinelPerfectHash_TableData), table->tableData, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_InitTable", "clEnqueueWriteBuffer"); return table; } int intintIdentitySentinelPerfectCLHash_CreateFactory(intintHash_Factory * factory, int hashIndex) { factory->createFunc[hashIndex] = &intintIdentitySentinelPerfectCLHash_CreateTable; factory->destroyFunc[hashIndex] = &intintIdentitySentinelPerfectCLHash_DestroyFactory; cl_int error; cl_device_id device; error = clGetContextInfo(factory->context, CL_CONTEXT_DEVICES, sizeof(device), &device, NULL); CLHash_Utilities_HandleError(error, "intintHash_CreateFactory", "clGetContextInfo"); factory->querySingleKernel[hashIndex] = clCreateKernel(factory->program, "intintIdentitySentinelPerfectCLHash_RangeQuerySingle", &error); CLHash_Utilities_HandleError(error, "intintIdentitySentinelPerfectCLHash_CreateFactory", "clCreateKernel"); factory->insertSingleKernel[hashIndex] = clCreateKernel(factory->program, "intintIdentitySentinelPerfectCLHash_RangeInsertSingle", &error); CLHash_Utilities_HandleError(error, "intintIdentitySentinelPerfectCLHash_CreateFactory", "clCreateKernel"); factory->insertSingleNoOverwriteKernel[hashIndex] = clCreateKernel(factory->program, "intintIdentitySentinelPerfectCLHash_RangeInsertSingleNoOverwrite", &error); CLHash_Utilities_HandleError(error, "intintIdentitySentinelPerfectCLHash_CreateFactory", "clCreateKernel"); factory->utilProgram[hashIndex] = CLHash_Utilities_BuildProgramString(factory->context, device, "static inline unsigned int intintHash_CompressIdentity(char data, int hashCode){ return hashCode; } typedef struct intintHash_CompressLCGData{ long unsigned int a; long unsigned int c; unsigned int m; unsigned int n; }intintHash_CompressLCGData; static inline unsigned int intintHash_CompressLCG(intintHash_CompressLCGData compressLCGData, int hashCode){ return ((compressLCGData.a * hashCode + compressLCGData.c) % compressLCGData.m) % compressLCGData.n; } typedef struct intintIdentitySentinelPerfectCLHash_TableData{ int hashID; unsigned int numBuckets; char compressFuncData; int emptyValue; }intintIdentitySentinelPerfectCLHash_TableData; typedef struct intintIdentitySentinelPerfectCLHash_Bucket{ int value; }intintIdentitySentinelPerfectCLHash_Bucket; __kernel void intintIdentitySentinelPerfectCLHash_Empty(__global char *tableData){ int index = get_global_id(0); if(index >= ((__global intintIdentitySentinelPerfectCLHash_TableData*)tableData)->numBuckets){ return; } __global intintIdentitySentinelPerfectCLHash_Bucket *buckets = (__global intintIdentitySentinelPerfectCLHash_Bucket*)&tableData[sizeof(intintIdentitySentinelPerfectCLHash_TableData)]; buckets[index].value = ((__global intintIdentitySentinelPerfectCLHash_TableData*)tableData)->emptyValue; }"); factory->emptyKernel[hashIndex] = clCreateKernel(factory->utilProgram[hashIndex], "intintIdentitySentinelPerfectCLHash_Empty", &error); CLHash_Utilities_HandleError(error, "intintIdentitySentinelPerfectCLHash_CreateFactory", "clCreateKernel"); error = clGetKernelWorkGroupInfo(factory->emptyKernel[hashIndex], device, CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), &factory-> emptyKernelLocalWorkSize[hashIndex], NULL); CLHash_Utilities_HandleError(error, "intintIdentitySentinelPerfectCLHash_CreateFactory", "clGetKernelWorkGroupInfo");;; return HASH_EXIT_CODE_NORMAL; } int intintIdentitySentinelPerfectCLHash_DestroyFactory(intintHash_Factory * factory, int hashIndex) {; clReleaseKernel(factory->emptyKernel[hashIndex]); clReleaseProgram(factory->utilProgram[hashIndex]); clReleaseKernel(factory->querySingleKernel[hashIndex]); clReleaseKernel(factory->insertSingleKernel[hashIndex]); clReleaseKernel(factory->insertSingleNoOverwriteKernel[hashIndex]);; return HASH_EXIT_CODE_NORMAL; } int intintIdentitySentinelPerfectCLHash_DestroyTable(intintHash_Table * table) { int exitCode = 0; clReleaseMemObject(table->tableDataBuffer); clReleaseContext(table->context); clReleaseCommandQueue(table->queue); clReleaseProgram(table->utilProgram); clReleaseKernel(table->emptyKernel); clReleaseProgram(table->program); clReleaseKernel(table->querySingleKernel); clReleaseKernel(table->insertSingleKernel); clReleaseKernel(table->insertSingleNoOverwriteKernel); free(table->tableData); free(table); return exitCode; } int intintIdentitySentinelPerfectCLHash_SetupTable(intintHash_Table * table) { int exitCode = 0; cl_int err; err = clSetKernelArg(table->emptyKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_EmptyTable", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(((intintIdentitySentinelPerfectHash_TableData *) table->tableData)->numBuckets, table->emptyKernelLocalWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->emptyKernel, 1, 0, &groupWorkSize, (const size_t *)&table-> emptyKernelLocalWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_EmptyTable", "clEnqueueNDRangeKernel"); exitCode = HASH_EXIT_CODE_NORMAL;; return exitCode; } int intintIdentitySentinelPerfectCLHash_EmptyTable(intintHash_Table * table) { int exitCode = 0; cl_int err; err = clSetKernelArg(table->emptyKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_EmptyTable", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(((intintIdentitySentinelPerfectHash_TableData *) table->tableData)->numBuckets, table->emptyKernelLocalWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->emptyKernel, 1, 0, &groupWorkSize, (const size_t *)&table-> emptyKernelLocalWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_EmptyTable", "clEnqueueNDRangeKernel"); exitCode = HASH_EXIT_CODE_NORMAL;; return exitCode; } int intintIdentitySentinelPerfectCLHash_QuerySingle(intintHash_Table * table, int key, int *valueOutput) { return intintIdentitySentinelPerfectCLHash_Query(table, 1, &key, valueOutput); } int intintIdentitySentinelPerfectCLHash_Query(intintHash_Table * table, size_t numKeys, int *keys, int *valuesOutput) { cl_int err; cl_mem keysBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numKeys, keys, &err); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_Query", "clCreateBuffer"); cl_mem valuesOutputBuffer = clCreateBuffer(table->context, CL_MEM_WRITE_ONLY, sizeof(int) * numKeys, NULL, &err); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_Query", "clCreateBuffer"); intintIdentitySentinelPerfectCLHash_BufferQuery(table, numKeys, keysBuffer, valuesOutputBuffer); err = clEnqueueReadBuffer(table->queue, valuesOutputBuffer, CL_TRUE, 0, sizeof(int) * numKeys, valuesOutput, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_Query", "clEnqueueReadBuffer"); clReleaseMemObject(keysBuffer); clReleaseMemObject(valuesOutputBuffer); return HASH_EXIT_CODE_NORMAL; } int intintIdentitySentinelPerfectCLHash_BufferQuery(intintHash_Table * table, size_t numKeys, cl_mem keysBuffer, cl_mem valuesOutputBuffer) { cl_int err; err = clSetKernelArg(table->querySingleKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_BufferQuery", "clSetKernelArg"); err = clSetKernelArg(table->querySingleKernel, 1, sizeof(unsigned int), &numKeys); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_BufferQuery", "clSetKernelArg"); err = clSetKernelArg(table->querySingleKernel, 2, sizeof(cl_mem), &keysBuffer); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_BufferQuery", "clSetKernelArg"); err = clSetKernelArg(table->querySingleKernel, 3, sizeof(cl_mem), &valuesOutputBuffer); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_BufferQuery", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(numKeys, table->localWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->querySingleKernel, 1, 0, &groupWorkSize, (const size_t *)&table->localWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_BufferQuery", "clEnqueueNDRangeKernel"); clFinish(table->queue); return HASH_EXIT_CODE_NORMAL; } int intintIdentitySentinelPerfectCLHash_InsertSingle(intintHash_Table * table, int key, int value) { return intintIdentitySentinelPerfectCLHash_Insert(table, 1, &key, &value); } int intintIdentitySentinelPerfectCLHash_Insert(intintHash_Table * table, size_t numEntries, int *keys, int *values) { cl_int err; cl_mem keysBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numEntries, keys, &err); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_Insert", "clCreateBuffer"); cl_mem valuesBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numEntries, values, &err); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_Insert", "clCreateBuffer"); intintIdentitySentinelPerfectCLHash_BufferInsert(table, numEntries, keysBuffer, valuesBuffer); clReleaseMemObject(keysBuffer); clReleaseMemObject(valuesBuffer); return HASH_EXIT_CODE_NORMAL; } int intintIdentitySentinelPerfectCLHash_BufferInsert(intintHash_Table * table, size_t numEntries, cl_mem keysBuffer, cl_mem valuesBuffer) { cl_int err; err = clSetKernelArg(table->insertSingleKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_BufferInsert", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleKernel, 1, sizeof(unsigned int), &numEntries); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_BufferInsert", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleKernel, 2, sizeof(cl_mem), &keysBuffer); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_BufferInsert", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleKernel, 3, sizeof(cl_mem), &valuesBuffer); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_BufferInsert", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(numEntries, table->localWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->insertSingleKernel, 1, 0, &groupWorkSize, (const size_t *)&table->localWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, NULL, "clEnqueueNDRangeKernel"); return (0); } int intintIdentitySentinelPerfectCLHash_InsertSingleNoOverwrite(intintHash_Table * table, int key, int value) { return intintIdentitySentinelPerfectCLHash_InsertNoOverwrite(table, 1, &key, &value); } int intintIdentitySentinelPerfectCLHash_InsertNoOverwrite(intintHash_Table * table, size_t numEntries, int *keys, int *values) { cl_int err; cl_mem keysBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numEntries, keys, &err); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_InsertNoOverwrite", "clCreateBuffer"); cl_mem valuesBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numEntries, values, &err); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_InsertNoOverwrite", "clCreateBuffer"); intintIdentitySentinelPerfectCLHash_BufferInsertNoOverwrite(table, numEntries, keysBuffer, valuesBuffer); clReleaseMemObject(keysBuffer); clReleaseMemObject(valuesBuffer); return HASH_EXIT_CODE_NORMAL; } int intintIdentitySentinelPerfectCLHash_BufferInsertNoOverwrite(intintHash_Table * table, size_t numEntries, cl_mem keysBuffer, cl_mem valuesBuffer) { cl_int err; err = clSetKernelArg(table->insertSingleNoOverwriteKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_BufferInsertNoOverwrite", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleNoOverwriteKernel, 1, sizeof(unsigned int), &numEntries); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_BufferInsertNoOverwrite", "ClSetKernelArg"); err = clSetKernelArg(table->insertSingleNoOverwriteKernel, 2, sizeof(cl_mem), &keysBuffer); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_BufferInsertNoOverwrite", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleNoOverwriteKernel, 3, sizeof(cl_mem), &valuesBuffer); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_BufferInsertNoOverwrite", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(numEntries, table->localWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->insertSingleNoOverwriteKernel, 1, 0, &groupWorkSize, (const size_t *)&table->localWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintIdentitySentinelPerfectCLHash_BufferInsertNoOverwrite", "clEnqueueNDRangeKernel"); return (0); } typedef struct intintIdentitySentinelPerfectOpenMPHash_TableData { int hashID; unsigned int numBuckets; char compressFuncData; int emptyValue; } intintIdentitySentinelPerfectOpenMPHash_TableData; typedef struct intintIdentitySentinelPerfectOpenMPHash_Bucket { int value; } intintIdentitySentinelPerfectOpenMPHash_Bucket; intintHash_Table *intintIdentitySentinelPerfectOpenMPHash_CreateTable(intintHash_Factory * factory, int hashIndex, size_t keyRange, size_t numEntries, float loadFactor) { intintHash_Table *table = (intintHash_Table *) malloc(sizeof(intintHash_Table)); table->destroyFunc = &intintIdentitySentinelPerfectOpenMPHash_DestroyTable; table->setupFunc = &intintIdentitySentinelPerfectOpenMPHash_SetupTable; table->emptyFunc = &intintIdentitySentinelPerfectOpenMPHash_EmptyTable; table->queryFunc = &intintIdentitySentinelPerfectOpenMPHash_Query; table->querySingleFunc = &intintIdentitySentinelPerfectOpenMPHash_QuerySingle; table->insertFunc = &intintIdentitySentinelPerfectOpenMPHash_Insert; table->insertSingleFunc = &intintIdentitySentinelPerfectOpenMPHash_InsertSingle; table->insertNoOverwriteFunc = &intintIdentitySentinelPerfectOpenMPHash_InsertNoOverwrite; table->insertSingleNoOverwriteFunc = &intintIdentitySentinelPerfectOpenMPHash_InsertSingleNoOverwrite; table->tableData = (char *) malloc(sizeof(intintIdentitySentinelPerfectOpenMPHash_TableData)); ((intintIdentitySentinelPerfectOpenMPHash_TableData *) table-> tableData)->hashID = IDENTITY_SENTINEL_PERFECT_OPENMP_HASH_ID; ((intintIdentitySentinelPerfectOpenMPHash_TableData *) table-> tableData)->emptyValue = factory->emptyValue; ((intintIdentitySentinelPerfectOpenMPHash_TableData *) table-> tableData)->numBuckets = keyRange + 1; char *tempHashData = (char *) malloc(sizeof(intintIdentitySentinelPerfectOpenMPHash_TableData) + ((intintIdentitySentinelPerfectOpenMPHash_TableData *) table->tableData)->numBuckets * sizeof(intintIdentitySentinelPerfectOpenMPHash_Bucket)); memcpy(tempHashData, table->tableData, sizeof(intintIdentitySentinelPerfectOpenMPHash_TableData)); free(table->tableData); table->tableData = tempHashData; return table; } int intintIdentitySentinelPerfectOpenMPHash_CreateFactory(intintHash_Factory * factory, int hashIndex) { factory->createFunc[hashIndex] = &intintIdentitySentinelPerfectOpenMPHash_CreateTable; factory->destroyFunc[hashIndex] = &intintIdentitySentinelPerfectOpenMPHash_DestroyFactory;; return HASH_EXIT_CODE_NORMAL; } int intintIdentitySentinelPerfectOpenMPHash_DestroyFactory(intintHash_Factory * factory, int hashIndex) {; return HASH_EXIT_CODE_NORMAL; } int intintIdentitySentinelPerfectOpenMPHash_DestroyTable(intintHash_Table * table) { int exitCode = 0; free(table->tableData); free(table); return exitCode; } int intintIdentitySentinelPerfectOpenMPHash_SetupTable(intintHash_Table * table) { int exitCode = 0; intintIdentitySentinelPerfectOpenMPHash_Bucket *buckets = (intintIdentitySentinelPerfectOpenMPHash_Bucket *) & table-> tableData[sizeof (intintIdentitySentinelPerfectOpenMPHash_TableData)]; if (intintHash_GetTableType(table) & ~HASH_SENTINEL_PERFECT_HASHES) { #pragma omp parallel for for (int index = 0; index < ((intintIdentitySentinelPerfectOpenMPHash_TableData *) table->tableData)->numBuckets; index++) { buckets[index].value = ((intintIdentitySentinelPerfectOpenMPHash_TableData *) table->tableData)->emptyValue; }} exitCode = HASH_EXIT_CODE_NORMAL; return exitCode; } int intintIdentitySentinelPerfectOpenMPHash_EmptyTable(intintHash_Table * table) { int exitCode = 0; intintIdentitySentinelPerfectOpenMPHash_Bucket *buckets = (intintIdentitySentinelPerfectOpenMPHash_Bucket *) & table-> tableData[sizeof (intintIdentitySentinelPerfectOpenMPHash_TableData)]; #pragma omp parallel for for (int index = 0; index < ((intintIdentitySentinelPerfectOpenMPHash_TableData *) table-> tableData)->numBuckets; index++) { buckets[index].value = ((intintIdentitySentinelPerfectOpenMPHash_TableData *) table->tableData)->emptyValue; } exitCode = HASH_EXIT_CODE_NORMAL; return exitCode; } int intintIdentitySentinelPerfectOpenMPHash_InnerQuerySingle(char *tableData, int key, int *valueOutput) { intintIdentitySentinelPerfectOpenMPHash_Bucket *buckets = (intintIdentitySentinelPerfectOpenMPHash_Bucket *) & tableData[sizeof (intintIdentitySentinelPerfectOpenMPHash_TableData)]; int index; int exitCode; index = intintHash_CompressIdentity(((intintIdentitySentinelPerfectOpenMPHash_TableData *) tableData)->compressFuncData, key); if (buckets[index].value != ((intintIdentitySentinelPerfectOpenMPHash_TableData *) tableData)-> emptyValue) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: *valueOutput = buckets[index].value; return HASH_EXIT_CODE_NORMAL; case HASH_SEARCH_CODE_MISMATCH: case HASH_SEARCH_CODE_EMPTY: return HASH_EXIT_CODE_KEY_DNE; default: return exitCode; } } int intintIdentitySentinelPerfectOpenMPHash_InnerQuery(char *tableData, unsigned int numKeys, int *keys, int *valuesOutput) { intintIdentitySentinelPerfectOpenMPHash_Bucket *buckets = (intintIdentitySentinelPerfectOpenMPHash_Bucket *) & tableData[sizeof (intintIdentitySentinelPerfectOpenMPHash_TableData)]; int key; int *valueOutput; int index; int exitCode; uint i; int resultExitCode = HASH_EXIT_CODE_NORMAL; for (i = 0; i < numKeys; i++) { key = keys[i]; valueOutput = &valuesOutput[i]; index = intintHash_CompressIdentity(((intintIdentitySentinelPerfectOpenMPHash_TableData *) tableData)->compressFuncData, key); if (buckets[index].value != ((intintIdentitySentinelPerfectOpenMPHash_TableData *) tableData)->emptyValue) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: *valueOutput = buckets[index].value; break; case HASH_SEARCH_CODE_MISMATCH: case HASH_SEARCH_CODE_EMPTY: resultExitCode = HASH_EXIT_CODE_KEY_DNE; break; default: return exitCode; } } return resultExitCode; } int intintIdentitySentinelPerfectOpenMPHash_InnerInsertSingle(char *tableData, int key, int value) { intintIdentitySentinelPerfectOpenMPHash_Bucket *buckets = (intintIdentitySentinelPerfectOpenMPHash_Bucket *) & tableData[sizeof (intintIdentitySentinelPerfectOpenMPHash_TableData)]; int index; int exitCode; index = intintHash_CompressIdentity(((intintIdentitySentinelPerfectOpenMPHash_TableData *) tableData)->compressFuncData, key); if (buckets[index].value != ((intintIdentitySentinelPerfectOpenMPHash_TableData *) tableData)-> emptyValue) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: buckets[index].value = value; return HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = value; return HASH_EXIT_CODE_NORMAL; default: return exitCode; } } int intintIdentitySentinelPerfectOpenMPHash_InnerInsert(char *tableData, unsigned int numEntries, int *keys, int *values) { intintIdentitySentinelPerfectOpenMPHash_Bucket *buckets = (intintIdentitySentinelPerfectOpenMPHash_Bucket *) & tableData[sizeof (intintIdentitySentinelPerfectOpenMPHash_TableData)]; int resultExitCode = HASH_EXIT_CODE_NORMAL; int key; int index; int exitCode; uint i; #pragma omp parallel for for (i = 0; i < numEntries; i++) { key = keys[i]; index = intintHash_CompressIdentity(((intintIdentitySentinelPerfectOpenMPHash_TableData *) tableData)->compressFuncData, key); if (buckets[index].value != ((intintIdentitySentinelPerfectOpenMPHash_TableData *) tableData)->emptyValue) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: resultExitCode = HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = values[i]; break; default: resultExitCode = exitCode; } } return resultExitCode; } int intintIdentitySentinelPerfectOpenMPHash_InnerInsertSingleNoOverwrite(char *tableData, int key, int value) { intintIdentitySentinelPerfectOpenMPHash_Bucket *buckets = (intintIdentitySentinelPerfectOpenMPHash_Bucket *) & tableData[sizeof (intintIdentitySentinelPerfectOpenMPHash_TableData)]; int index; int exitCode; index = intintHash_CompressIdentity(((intintIdentitySentinelPerfectOpenMPHash_TableData *) tableData)->compressFuncData, key); if (buckets[index].value != ((intintIdentitySentinelPerfectOpenMPHash_TableData *) tableData)-> emptyValue) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: return HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = value; return HASH_EXIT_CODE_NORMAL; default: return exitCode; } } int intintIdentitySentinelPerfectOpenMPHash_InnerInsertNoOverwrite(char *tableData, unsigned int numEntries, int *keys, int *values) { intintIdentitySentinelPerfectOpenMPHash_Bucket *buckets = (intintIdentitySentinelPerfectOpenMPHash_Bucket *) & tableData[sizeof (intintIdentitySentinelPerfectOpenMPHash_TableData)]; int resultExitCode = HASH_EXIT_CODE_NORMAL; int key; int index; int exitCode; uint i; #pragma omp parallel for for (i = 0; i < numEntries; i++) { key = keys[i]; index = intintHash_CompressIdentity(((intintIdentitySentinelPerfectOpenMPHash_TableData *) tableData)->compressFuncData, key); if (buckets[index].value != ((intintIdentitySentinelPerfectOpenMPHash_TableData *) tableData)->emptyValue) { exitCode = HASH_SEARCH_CODE_MATCH; } else { exitCode = HASH_SEARCH_CODE_EMPTY; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: resultExitCode = HASH_EXIT_CODE_OVERWRITE; break; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = values[i]; break; default: resultExitCode = exitCode; } } return resultExitCode; } int intintIdentitySentinelPerfectOpenMPHash_QuerySingle(intintHash_Table * table, int key, int *valueOutput) { return intintIdentitySentinelPerfectOpenMPHash_InnerQuerySingle(table-> tableData, key, valueOutput); } int intintIdentitySentinelPerfectOpenMPHash_Query(intintHash_Table * table, size_t numKeys, int *keys, int *valuesOutput) { return intintIdentitySentinelPerfectOpenMPHash_InnerQuery(table-> tableData, numKeys, keys, valuesOutput); } int intintIdentitySentinelPerfectOpenMPHash_InsertSingle(intintHash_Table * table, int key, int value) { return intintIdentitySentinelPerfectOpenMPHash_InnerInsertSingle(table-> tableData, key, value); } int intintIdentitySentinelPerfectOpenMPHash_Insert(intintHash_Table * table, size_t numEntries, int *keys, int *values) { return intintIdentitySentinelPerfectOpenMPHash_InnerInsert(table-> tableData, numEntries, keys, values); } int intintIdentitySentinelPerfectOpenMPHash_InsertSingleNoOverwrite(intintHash_Table * table, int key, int value) { return intintIdentitySentinelPerfectOpenMPHash_InnerInsertSingleNoOverwrite (table->tableData, key, value); } int intintIdentitySentinelPerfectOpenMPHash_InsertNoOverwrite(intintHash_Table * table, size_t numEntries, int *keys, int *values) { return intintIdentitySentinelPerfectOpenMPHash_InnerInsertNoOverwrite (table->tableData, numEntries, keys, values); } typedef struct intintLCGLinearOpenCompactHash_TableData { int hashID; unsigned int numBuckets; intintHash_CompressLCGData compressFuncData; } intintLCGLinearOpenCompactHash_TableData; typedef struct intintLCGLinearOpenCompactHash_Bucket { int key; int value; } intintLCGLinearOpenCompactHash_Bucket; intintHash_Table *intintLCGLinearOpenCompactHash_CreateTable(intintHash_Factory * factory, int hashIndex, size_t keyRange, size_t numEntries, float loadFactor) { intintHash_Table *table = (intintHash_Table *) malloc(sizeof(intintHash_Table)); table->destroyFunc = &intintLCGLinearOpenCompactHash_DestroyTable; table->setupFunc = &intintLCGLinearOpenCompactHash_SetupTable; table->emptyFunc = &intintLCGLinearOpenCompactHash_EmptyTable; table->queryFunc = &intintLCGLinearOpenCompactHash_Query; table->querySingleFunc = &intintLCGLinearOpenCompactHash_QuerySingle; table->insertFunc = &intintLCGLinearOpenCompactHash_Insert; table->insertSingleFunc = &intintLCGLinearOpenCompactHash_InsertSingle; table->insertNoOverwriteFunc = &intintLCGLinearOpenCompactHash_InsertNoOverwrite; table->insertSingleNoOverwriteFunc = &intintLCGLinearOpenCompactHash_InsertSingleNoOverwrite; table->tableData = (char *)malloc(sizeof(intintLCGLinearOpenCompactHash_TableData)); ((intintLCGLinearOpenCompactHash_TableData *) table->tableData)-> hashID = LCG_LINEAR_OPEN_COMPACT_HASH_ID; ((intintLCGLinearOpenCompactHash_TableData *) table->tableData)-> numBuckets = (unsigned int)((double)numEntries / loadFactor); ((intintLCGLinearOpenCompactHash_TableData *) table->tableData)-> compressFuncData.a = HASH_LCG_A; ((intintLCGLinearOpenCompactHash_TableData *) table->tableData)-> compressFuncData.c = HASH_LCG_C; ((intintLCGLinearOpenCompactHash_TableData *) table->tableData)-> compressFuncData.m = HASH_LCG_M; ((intintLCGLinearOpenCompactHash_TableData *) table->tableData)-> compressFuncData.n = ((intintLCGLinearOpenCompactHash_TableData *) table->tableData)-> numBuckets; char *tempHashData = (char *)malloc(sizeof(intintLCGLinearOpenCompactHash_TableData) + ((intintLCGLinearOpenCompactHash_TableData *) table-> tableData)->numBuckets * sizeof(intintLCGLinearOpenCompactHash_Bucket)); memcpy(tempHashData, table->tableData, sizeof(intintLCGLinearOpenCompactHash_TableData)); free(table->tableData); table->tableData = tempHashData; return table; } int intintLCGLinearOpenCompactHash_CreateFactory(intintHash_Factory * factory, int hashIndex) { factory->createFunc[hashIndex] = &intintLCGLinearOpenCompactHash_CreateTable; factory->destroyFunc[hashIndex] = &intintLCGLinearOpenCompactHash_DestroyFactory;; return HASH_EXIT_CODE_NORMAL; } int intintLCGLinearOpenCompactHash_DestroyFactory(intintHash_Factory * factory, int hashIndex) {; return HASH_EXIT_CODE_NORMAL; } int intintLCGLinearOpenCompactHash_DestroyTable(intintHash_Table * table) { int exitCode = 0; free(table->tableData); free(table); return exitCode; } int intintLCGLinearOpenCompactHash_SetupTable(intintHash_Table * table) { int exitCode = 0; intintLCGLinearOpenCompactHash_Bucket *buckets = (intintLCGLinearOpenCompactHash_Bucket *) & table-> tableData[sizeof(intintLCGLinearOpenCompactHash_TableData)]; if (intintHash_GetTableType(table) & ~HASH_SENTINEL_PERFECT_HASHES) { for (int index = 0; index < ((intintLCGLinearOpenCompactHash_TableData *) table-> tableData)->numBuckets; index++) { buckets[index].key = HASH_BUCKET_STATUS_EMPTY; }} exitCode = HASH_EXIT_CODE_NORMAL; return exitCode; } int intintLCGLinearOpenCompactHash_EmptyTable(intintHash_Table * table) { int exitCode = 0; intintLCGLinearOpenCompactHash_Bucket *buckets = (intintLCGLinearOpenCompactHash_Bucket *) & table-> tableData[sizeof(intintLCGLinearOpenCompactHash_TableData)]; for (int index = 0; index < ((intintLCGLinearOpenCompactHash_TableData *) table->tableData)-> numBuckets; index++) { buckets[index].key = HASH_BUCKET_STATUS_EMPTY; } exitCode = HASH_EXIT_CODE_NORMAL; return exitCode; } int intintLCGLinearOpenCompactHash_InnerQuerySingle(char *tableData, int key, int *valueOutput) { intintLCGLinearOpenCompactHash_Bucket *buckets = (intintLCGLinearOpenCompactHash_Bucket *) & tableData[sizeof(intintLCGLinearOpenCompactHash_TableData)]; int index; int exitCode; intintLCGLinearOpenCompactHash_TableData *mytableData = (intintLCGLinearOpenCompactHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration + c) % ((intintLCGLinearOpenCompactHash_TableData *) tableData)-> numBuckets); if ((buckets[index].key) == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((index == c && iteration > 0)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: *valueOutput = buckets[index].value; return HASH_EXIT_CODE_NORMAL; case HASH_SEARCH_CODE_MISMATCH: case HASH_SEARCH_CODE_EMPTY: return HASH_EXIT_CODE_KEY_DNE; default: return exitCode; } } int intintLCGLinearOpenCompactHash_InnerQuery(char *tableData, unsigned int numKeys, int *keys, int *valuesOutput) { intintLCGLinearOpenCompactHash_Bucket *buckets = (intintLCGLinearOpenCompactHash_Bucket *) & tableData[sizeof(intintLCGLinearOpenCompactHash_TableData)]; int key; int *valueOutput; int index; int exitCode; uint i; int resultExitCode = HASH_EXIT_CODE_NORMAL; for (i = 0; i < numKeys; i++) { key = keys[i]; valueOutput = &valuesOutput[i]; intintLCGLinearOpenCompactHash_TableData *mytableData = (intintLCGLinearOpenCompactHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration + c) % ((intintLCGLinearOpenCompactHash_TableData *) tableData)->numBuckets); if ((buckets[index].key) == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((index == c && iteration > 0)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: *valueOutput = buckets[index].value; break; case HASH_SEARCH_CODE_MISMATCH: case HASH_SEARCH_CODE_EMPTY: resultExitCode = HASH_EXIT_CODE_KEY_DNE; break; default: return exitCode; } } return resultExitCode; } int intintLCGLinearOpenCompactHash_InnerInsertSingle(char *tableData, int key, int value) { intintLCGLinearOpenCompactHash_Bucket *buckets = (intintLCGLinearOpenCompactHash_Bucket *) & tableData[sizeof(intintLCGLinearOpenCompactHash_TableData)]; int index; int exitCode; intintLCGLinearOpenCompactHash_TableData *mytableData = (intintLCGLinearOpenCompactHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration + c) % ((intintLCGLinearOpenCompactHash_TableData *) tableData)-> numBuckets); if (((buckets[index].key == HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key = key, HASH_BUCKET_STATUS_EMPTY) : buckets[index].key) == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((index == c && iteration > 0)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: buckets[index].value = value; return HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = value; return HASH_EXIT_CODE_NORMAL; default: return exitCode; } } int intintLCGLinearOpenCompactHash_InnerInsert(char *tableData, unsigned int numEntries, int *keys, int *values) { intintLCGLinearOpenCompactHash_Bucket *buckets = (intintLCGLinearOpenCompactHash_Bucket *) & tableData[sizeof(intintLCGLinearOpenCompactHash_TableData)]; int resultExitCode = HASH_EXIT_CODE_NORMAL; int key; int index; int exitCode; uint i;; for (i = 0; i < numEntries; i++) { key = keys[i]; intintLCGLinearOpenCompactHash_TableData *mytableData = (intintLCGLinearOpenCompactHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration + c) % ((intintLCGLinearOpenCompactHash_TableData *) tableData)->numBuckets); if (((buckets[index].key == HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key = key, HASH_BUCKET_STATUS_EMPTY) : buckets[index].key) == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((index == c && iteration > 0)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: resultExitCode = HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = values[i]; break; default: resultExitCode = exitCode; } } return resultExitCode; } int intintLCGLinearOpenCompactHash_InnerInsertSingleNoOverwrite(char *tableData, int key, int value) { intintLCGLinearOpenCompactHash_Bucket *buckets = (intintLCGLinearOpenCompactHash_Bucket *) & tableData[sizeof(intintLCGLinearOpenCompactHash_TableData)]; int index; int exitCode; intintLCGLinearOpenCompactHash_TableData *mytableData = (intintLCGLinearOpenCompactHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration + c) % ((intintLCGLinearOpenCompactHash_TableData *) tableData)-> numBuckets); if (((buckets[index].key == HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key = key, HASH_BUCKET_STATUS_EMPTY) : buckets[index].key) == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((index == c && iteration > 0)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: return HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = value; return HASH_EXIT_CODE_NORMAL; default: return exitCode; } } int intintLCGLinearOpenCompactHash_InnerInsertNoOverwrite(char *tableData, unsigned int numEntries, int *keys, int *values) { intintLCGLinearOpenCompactHash_Bucket *buckets = (intintLCGLinearOpenCompactHash_Bucket *) & tableData[sizeof(intintLCGLinearOpenCompactHash_TableData)]; int resultExitCode = HASH_EXIT_CODE_NORMAL; int key; int index; int exitCode; uint i;; for (i = 0; i < numEntries; i++) { key = keys[i]; intintLCGLinearOpenCompactHash_TableData *mytableData = (intintLCGLinearOpenCompactHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration + c) % ((intintLCGLinearOpenCompactHash_TableData *) tableData)->numBuckets); if (((buckets[index].key == HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key = key, HASH_BUCKET_STATUS_EMPTY) : buckets[index].key) == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((index == c && iteration > 0)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: resultExitCode = HASH_EXIT_CODE_OVERWRITE; break; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = values[i]; break; default: resultExitCode = exitCode; } } return resultExitCode; } int intintLCGLinearOpenCompactHash_QuerySingle(intintHash_Table * table, int key, int *valueOutput) { return intintLCGLinearOpenCompactHash_InnerQuerySingle(table->tableData, key, valueOutput); } int intintLCGLinearOpenCompactHash_Query(intintHash_Table * table, size_t numKeys, int *keys, int *valuesOutput) { return intintLCGLinearOpenCompactHash_InnerQuery(table->tableData, numKeys, keys, valuesOutput); } int intintLCGLinearOpenCompactHash_InsertSingle(intintHash_Table * table, int key, int value) { return intintLCGLinearOpenCompactHash_InnerInsertSingle(table-> tableData, key, value); } int intintLCGLinearOpenCompactHash_Insert(intintHash_Table * table, size_t numEntries, int *keys, int *values) { return intintLCGLinearOpenCompactHash_InnerInsert(table->tableData, numEntries, keys, values); } int intintLCGLinearOpenCompactHash_InsertSingleNoOverwrite(intintHash_Table * table, int key, int value) { return intintLCGLinearOpenCompactHash_InnerInsertSingleNoOverwrite(table-> tableData, key, value); } int intintLCGLinearOpenCompactHash_InsertNoOverwrite(intintHash_Table * table, size_t numEntries, int *keys, int *values) { return intintLCGLinearOpenCompactHash_InnerInsertNoOverwrite(table-> tableData, numEntries, keys, values); } typedef struct intintLCGLinearOpenCompactCLHash_TableData { int hashID; unsigned int numBuckets; intintHash_CompressLCGData compressFuncData; } intintLCGLinearOpenCompactCLHash_TableData; typedef struct intintLCGLinearOpenCompactCLHash_Bucket { int key; int value; } intintLCGLinearOpenCompactCLHash_Bucket; intintHash_Table *intintLCGLinearOpenCompactCLHash_CreateTable(intintHash_Factory * factory, int hashIndex, size_t keyRange, size_t numEntries, float loadFactor) { intintHash_Table *table = (intintHash_Table *) malloc(sizeof(intintHash_Table)); table->destroyFunc = &intintLCGLinearOpenCompactCLHash_DestroyTable; table->setupFunc = &intintLCGLinearOpenCompactCLHash_SetupTable; table->emptyFunc = &intintLCGLinearOpenCompactCLHash_EmptyTable; table->queryFunc = &intintLCGLinearOpenCompactCLHash_Query; table->querySingleFunc = &intintLCGLinearOpenCompactCLHash_QuerySingle; table->insertFunc = &intintLCGLinearOpenCompactCLHash_Insert; table->insertSingleFunc = &intintLCGLinearOpenCompactCLHash_InsertSingle; table->insertNoOverwriteFunc = &intintLCGLinearOpenCompactCLHash_InsertNoOverwrite; table->insertSingleNoOverwriteFunc = &intintLCGLinearOpenCompactCLHash_InsertSingleNoOverwrite; table->tableData = (char *)malloc(sizeof(intintLCGLinearOpenCompactCLHash_TableData)); ((intintLCGLinearOpenCompactCLHash_TableData *) table->tableData)-> hashID = LCG_LINEAR_OPEN_COMPACT_CL_HASH_ID; table->context = factory->context; table->queue = factory->queue; table->program = factory->program; table->localWorkSize = factory->localWorkSize; table->utilProgram = factory->utilProgram[hashIndex]; table->emptyKernel = factory->emptyKernel[hashIndex]; table->emptyKernelLocalWorkSize = factory->emptyKernelLocalWorkSize[hashIndex]; table->querySingleKernel = factory->querySingleKernel[hashIndex]; table->insertSingleKernel = factory->insertSingleKernel[hashIndex]; table->insertSingleNoOverwriteKernel = factory->insertSingleNoOverwriteKernel[hashIndex]; clRetainContext(table->context); clRetainCommandQueue(table->queue); clRetainProgram(table->program); clRetainProgram(table->utilProgram); clRetainKernel(table->emptyKernel); clRetainKernel(table->querySingleKernel); clRetainKernel(table->insertSingleKernel); clRetainKernel(table->insertSingleNoOverwriteKernel);; ((intintLCGLinearOpenCompactCLHash_TableData *) table->tableData)-> numBuckets = (unsigned int)((double)numEntries / loadFactor); ((intintLCGLinearOpenCompactCLHash_TableData *) table->tableData)-> compressFuncData.a = HASH_LCG_A; ((intintLCGLinearOpenCompactCLHash_TableData *) table->tableData)-> compressFuncData.c = HASH_LCG_C; ((intintLCGLinearOpenCompactCLHash_TableData *) table->tableData)-> compressFuncData.m = HASH_LCG_M; ((intintLCGLinearOpenCompactCLHash_TableData *) table->tableData)-> compressFuncData.n = ((intintLCGLinearOpenCompactCLHash_TableData *) table->tableData)-> numBuckets; char *tempHashData = (char *)malloc(sizeof(intintLCGLinearOpenCompactCLHash_TableData) + ((intintLCGLinearOpenCompactCLHash_TableData *) table->tableData)->numBuckets * sizeof(intintLCGLinearOpenCompactCLHash_Bucket)); memcpy(tempHashData, table->tableData, sizeof(intintLCGLinearOpenCompactCLHash_TableData)); free(table->tableData); table->tableData = tempHashData; cl_int err; table->tableDataBuffer = clCreateBuffer(table->context, CL_MEM_READ_WRITE, sizeof(intintLCGLinearOpenCompactHash_TableData) + ((intintLCGLinearOpenCompactHash_TableData *) table-> tableData)->numBuckets * sizeof(intintLCGLinearOpenCompactHash_Bucket), NULL, &err); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_InitTable", "clCreateBuffer"); err = clEnqueueWriteBuffer(table->queue, table->tableDataBuffer, CL_TRUE, 0, sizeof (intintLCGLinearOpenCompactHash_TableData), table->tableData, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_InitTable", "clEnqueueWriteBuffer"); return table; } int intintLCGLinearOpenCompactCLHash_CreateFactory(intintHash_Factory * factory, int hashIndex) { factory->createFunc[hashIndex] = &intintLCGLinearOpenCompactCLHash_CreateTable; factory->destroyFunc[hashIndex] = &intintLCGLinearOpenCompactCLHash_DestroyFactory; cl_int error; cl_device_id device; error = clGetContextInfo(factory->context, CL_CONTEXT_DEVICES, sizeof(device), &device, NULL); CLHash_Utilities_HandleError(error, "intintHash_CreateFactory", "clGetContextInfo"); factory->querySingleKernel[hashIndex] = clCreateKernel(factory->program, "intintLCGLinearOpenCompactCLHash_RangeQuerySingle", &error); CLHash_Utilities_HandleError(error, "intintLCGLinearOpenCompactCLHash_CreateFactory", "clCreateKernel"); factory->insertSingleKernel[hashIndex] = clCreateKernel(factory->program, "intintLCGLinearOpenCompactCLHash_RangeInsertSingle", &error); CLHash_Utilities_HandleError(error, "intintLCGLinearOpenCompactCLHash_CreateFactory", "clCreateKernel"); factory->insertSingleNoOverwriteKernel[hashIndex] = clCreateKernel(factory->program, "intintLCGLinearOpenCompactCLHash_RangeInsertSingleNoOverwrite", &error); CLHash_Utilities_HandleError(error, "intintLCGLinearOpenCompactCLHash_CreateFactory", "clCreateKernel"); factory->utilProgram[hashIndex] = CLHash_Utilities_BuildProgramString(factory->context, device, "static inline unsigned int intintHash_CompressIdentity(char data, int hashCode){ return hashCode; } typedef struct intintHash_CompressLCGData{ long unsigned int a; long unsigned int c; unsigned int m; unsigned int n; }intintHash_CompressLCGData; static inline unsigned int intintHash_CompressLCG(intintHash_CompressLCGData compressLCGData, int hashCode){ return ((compressLCGData.a * hashCode + compressLCGData.c) % compressLCGData.m) % compressLCGData.n; } typedef struct intintLCGLinearOpenCompactCLHash_TableData{ int hashID; unsigned int numBuckets; intintHash_CompressLCGData compressFuncData; }intintLCGLinearOpenCompactCLHash_TableData; typedef struct intintLCGLinearOpenCompactCLHash_Bucket{ int key; int value; }intintLCGLinearOpenCompactCLHash_Bucket; __kernel void intintLCGLinearOpenCompactCLHash_Empty(__global char *tableData){ int index = get_global_id(0); if(index >= ((__global intintLCGLinearOpenCompactCLHash_TableData*)tableData)->numBuckets){ return; } __global intintLCGLinearOpenCompactCLHash_Bucket *buckets = (__global intintLCGLinearOpenCompactCLHash_Bucket*)&tableData[sizeof(intintLCGLinearOpenCompactCLHash_TableData)]; buckets[index].key = -1;/*HASH_BUCKET_STATUS_EMPTY*/ }"); factory->emptyKernel[hashIndex] = clCreateKernel(factory->utilProgram[hashIndex], "intintLCGLinearOpenCompactCLHash_Empty", &error); CLHash_Utilities_HandleError(error, "intintLCGLinearOpenCompactCLHash_CreateFactory", "clCreateKernel"); error = clGetKernelWorkGroupInfo(factory->emptyKernel[hashIndex], device, CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), &factory-> emptyKernelLocalWorkSize[hashIndex], NULL); CLHash_Utilities_HandleError(error, "intintLCGLinearOpenCompactCLHash_CreateFactory", "clGetKernelWorkGroupInfo");;; return HASH_EXIT_CODE_NORMAL; } int intintLCGLinearOpenCompactCLHash_DestroyFactory(intintHash_Factory * factory, int hashIndex) {; clReleaseKernel(factory->emptyKernel[hashIndex]); clReleaseProgram(factory->utilProgram[hashIndex]); clReleaseKernel(factory->querySingleKernel[hashIndex]); clReleaseKernel(factory->insertSingleKernel[hashIndex]); clReleaseKernel(factory->insertSingleNoOverwriteKernel[hashIndex]);; return HASH_EXIT_CODE_NORMAL; } int intintLCGLinearOpenCompactCLHash_DestroyTable(intintHash_Table * table) { int exitCode = 0; clReleaseMemObject(table->tableDataBuffer); clReleaseContext(table->context); clReleaseCommandQueue(table->queue); clReleaseProgram(table->utilProgram); clReleaseKernel(table->emptyKernel); clReleaseProgram(table->program); clReleaseKernel(table->querySingleKernel); clReleaseKernel(table->insertSingleKernel); clReleaseKernel(table->insertSingleNoOverwriteKernel); free(table->tableData); free(table); return exitCode; } int intintLCGLinearOpenCompactCLHash_SetupTable(intintHash_Table * table) { int exitCode = 0; cl_int err; err = clSetKernelArg(table->emptyKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_EmptyTable", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(((intintLCGLinearOpenCompactHash_TableData *) table->tableData)->numBuckets, table->emptyKernelLocalWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->emptyKernel, 1, 0, &groupWorkSize, (const size_t *)&table-> emptyKernelLocalWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_EmptyTable", "clEnqueueNDRangeKernel"); exitCode = HASH_EXIT_CODE_NORMAL;; return exitCode; } int intintLCGLinearOpenCompactCLHash_EmptyTable(intintHash_Table * table) { int exitCode = 0; cl_int err; err = clSetKernelArg(table->emptyKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_EmptyTable", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(((intintLCGLinearOpenCompactHash_TableData *) table->tableData)->numBuckets, table->emptyKernelLocalWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->emptyKernel, 1, 0, &groupWorkSize, (const size_t *)&table-> emptyKernelLocalWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_EmptyTable", "clEnqueueNDRangeKernel"); exitCode = HASH_EXIT_CODE_NORMAL;; return exitCode; } int intintLCGLinearOpenCompactCLHash_QuerySingle(intintHash_Table * table, int key, int *valueOutput) { return intintLCGLinearOpenCompactCLHash_Query(table, 1, &key, valueOutput); } int intintLCGLinearOpenCompactCLHash_Query(intintHash_Table * table, size_t numKeys, int *keys, int *valuesOutput) { cl_int err; cl_mem keysBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numKeys, keys, &err); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_Query", "clCreateBuffer"); cl_mem valuesOutputBuffer = clCreateBuffer(table->context, CL_MEM_WRITE_ONLY, sizeof(int) * numKeys, NULL, &err); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_Query", "clCreateBuffer"); intintLCGLinearOpenCompactCLHash_BufferQuery(table, numKeys, keysBuffer, valuesOutputBuffer); err = clEnqueueReadBuffer(table->queue, valuesOutputBuffer, CL_TRUE, 0, sizeof(int) * numKeys, valuesOutput, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_Query", "clEnqueueReadBuffer"); clReleaseMemObject(keysBuffer); clReleaseMemObject(valuesOutputBuffer); return HASH_EXIT_CODE_NORMAL; } int intintLCGLinearOpenCompactCLHash_BufferQuery(intintHash_Table * table, size_t numKeys, cl_mem keysBuffer, cl_mem valuesOutputBuffer) { cl_int err; err = clSetKernelArg(table->querySingleKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_BufferQuery", "clSetKernelArg"); err = clSetKernelArg(table->querySingleKernel, 1, sizeof(unsigned int), &numKeys); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_BufferQuery", "clSetKernelArg"); err = clSetKernelArg(table->querySingleKernel, 2, sizeof(cl_mem), &keysBuffer); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_BufferQuery", "clSetKernelArg"); err = clSetKernelArg(table->querySingleKernel, 3, sizeof(cl_mem), &valuesOutputBuffer); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_BufferQuery", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(numKeys, table->localWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->querySingleKernel, 1, 0, &groupWorkSize, (const size_t *)&table->localWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_BufferQuery", "clEnqueueNDRangeKernel"); clFinish(table->queue); return HASH_EXIT_CODE_NORMAL; } int intintLCGLinearOpenCompactCLHash_InsertSingle(intintHash_Table * table, int key, int value) { return intintLCGLinearOpenCompactCLHash_Insert(table, 1, &key, &value); } int intintLCGLinearOpenCompactCLHash_Insert(intintHash_Table * table, size_t numEntries, int *keys, int *values) { cl_int err; cl_mem keysBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numEntries, keys, &err); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_Insert", "clCreateBuffer"); cl_mem valuesBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numEntries, values, &err); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_Insert", "clCreateBuffer"); intintLCGLinearOpenCompactCLHash_BufferInsert(table, numEntries, keysBuffer, valuesBuffer); clReleaseMemObject(keysBuffer); clReleaseMemObject(valuesBuffer); return HASH_EXIT_CODE_NORMAL; } int intintLCGLinearOpenCompactCLHash_BufferInsert(intintHash_Table * table, size_t numEntries, cl_mem keysBuffer, cl_mem valuesBuffer) { cl_int err; err = clSetKernelArg(table->insertSingleKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_BufferInsert", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleKernel, 1, sizeof(unsigned int), &numEntries); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_BufferInsert", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleKernel, 2, sizeof(cl_mem), &keysBuffer); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_BufferInsert", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleKernel, 3, sizeof(cl_mem), &valuesBuffer); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_BufferInsert", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(numEntries, table->localWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->insertSingleKernel, 1, 0, &groupWorkSize, (const size_t *)&table->localWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, NULL, "clEnqueueNDRangeKernel"); return (0); } int intintLCGLinearOpenCompactCLHash_InsertSingleNoOverwrite(intintHash_Table * table, int key, int value) { return intintLCGLinearOpenCompactCLHash_InsertNoOverwrite(table, 1, &key, &value); } int intintLCGLinearOpenCompactCLHash_InsertNoOverwrite(intintHash_Table * table, size_t numEntries, int *keys, int *values) { cl_int err; cl_mem keysBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numEntries, keys, &err); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_InsertNoOverwrite", "clCreateBuffer"); cl_mem valuesBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numEntries, values, &err); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_InsertNoOverwrite", "clCreateBuffer"); intintLCGLinearOpenCompactCLHash_BufferInsertNoOverwrite(table, numEntries, keysBuffer, valuesBuffer); clReleaseMemObject(keysBuffer); clReleaseMemObject(valuesBuffer); return HASH_EXIT_CODE_NORMAL; } int intintLCGLinearOpenCompactCLHash_BufferInsertNoOverwrite(intintHash_Table * table, size_t numEntries, cl_mem keysBuffer, cl_mem valuesBuffer) { cl_int err; err = clSetKernelArg(table->insertSingleNoOverwriteKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_BufferInsertNoOverwrite", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleNoOverwriteKernel, 1, sizeof(unsigned int), &numEntries); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_BufferInsertNoOverwrite", "ClSetKernelArg"); err = clSetKernelArg(table->insertSingleNoOverwriteKernel, 2, sizeof(cl_mem), &keysBuffer); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_BufferInsertNoOverwrite", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleNoOverwriteKernel, 3, sizeof(cl_mem), &valuesBuffer); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_BufferInsertNoOverwrite", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(numEntries, table->localWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->insertSingleNoOverwriteKernel, 1, 0, &groupWorkSize, (const size_t *)&table->localWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintLCGLinearOpenCompactCLHash_BufferInsertNoOverwrite", "clEnqueueNDRangeKernel"); return (0); } typedef struct intintLCGLinearOpenCompactOpenMPHash_TableData { int hashID; unsigned int numBuckets; intintHash_CompressLCGData compressFuncData; } intintLCGLinearOpenCompactOpenMPHash_TableData; typedef struct intintLCGLinearOpenCompactOpenMPHash_Bucket { int key; int value; } intintLCGLinearOpenCompactOpenMPHash_Bucket; intintHash_Table *intintLCGLinearOpenCompactOpenMPHash_CreateTable(intintHash_Factory * factory, int hashIndex, size_t keyRange, size_t numEntries, float loadFactor) { intintHash_Table *table = (intintHash_Table *) malloc(sizeof(intintHash_Table)); table->destroyFunc = &intintLCGLinearOpenCompactOpenMPHash_DestroyTable; table->setupFunc = &intintLCGLinearOpenCompactOpenMPHash_SetupTable; table->emptyFunc = &intintLCGLinearOpenCompactOpenMPHash_EmptyTable; table->queryFunc = &intintLCGLinearOpenCompactOpenMPHash_Query; table->querySingleFunc = &intintLCGLinearOpenCompactOpenMPHash_QuerySingle; table->insertFunc = &intintLCGLinearOpenCompactOpenMPHash_Insert; table->insertSingleFunc = &intintLCGLinearOpenCompactOpenMPHash_InsertSingle; table->insertNoOverwriteFunc = &intintLCGLinearOpenCompactOpenMPHash_InsertNoOverwrite; table->insertSingleNoOverwriteFunc = &intintLCGLinearOpenCompactOpenMPHash_InsertSingleNoOverwrite; table->tableData = (char *) malloc(sizeof(intintLCGLinearOpenCompactOpenMPHash_TableData)); ((intintLCGLinearOpenCompactOpenMPHash_TableData *) table->tableData)-> hashID = LCG_LINEAR_OPEN_COMPACT_OPENMP_HASH_ID; ((intintLCGLinearOpenCompactOpenMPHash_TableData *) table->tableData)-> numBuckets = (unsigned int)((double)numEntries / loadFactor); ((intintLCGLinearOpenCompactOpenMPHash_TableData *) table->tableData)-> compressFuncData.a = HASH_LCG_A; ((intintLCGLinearOpenCompactOpenMPHash_TableData *) table->tableData)-> compressFuncData.c = HASH_LCG_C; ((intintLCGLinearOpenCompactOpenMPHash_TableData *) table->tableData)-> compressFuncData.m = HASH_LCG_M; ((intintLCGLinearOpenCompactOpenMPHash_TableData *) table->tableData)-> compressFuncData.n = ((intintLCGLinearOpenCompactOpenMPHash_TableData *) table-> tableData)->numBuckets; char *tempHashData = (char *) malloc(sizeof(intintLCGLinearOpenCompactOpenMPHash_TableData) + ((intintLCGLinearOpenCompactOpenMPHash_TableData *) table-> tableData)->numBuckets * sizeof(intintLCGLinearOpenCompactOpenMPHash_Bucket)); memcpy(tempHashData, table->tableData, sizeof(intintLCGLinearOpenCompactOpenMPHash_TableData)); free(table->tableData); table->tableData = tempHashData; return table; } int intintLCGLinearOpenCompactOpenMPHash_CreateFactory(intintHash_Factory * factory, int hashIndex) { factory->createFunc[hashIndex] = &intintLCGLinearOpenCompactOpenMPHash_CreateTable; factory->destroyFunc[hashIndex] = &intintLCGLinearOpenCompactOpenMPHash_DestroyFactory;; return HASH_EXIT_CODE_NORMAL; } int intintLCGLinearOpenCompactOpenMPHash_DestroyFactory(intintHash_Factory * factory, int hashIndex) {; return HASH_EXIT_CODE_NORMAL; } int intintLCGLinearOpenCompactOpenMPHash_DestroyTable(intintHash_Table * table) { int exitCode = 0; free(table->tableData); free(table); return exitCode; } int intintLCGLinearOpenCompactOpenMPHash_SetupTable(intintHash_Table * table) { int exitCode = 0; intintLCGLinearOpenCompactOpenMPHash_Bucket *buckets = (intintLCGLinearOpenCompactOpenMPHash_Bucket *) & table-> tableData[sizeof(intintLCGLinearOpenCompactOpenMPHash_TableData)]; if (intintHash_GetTableType(table) & ~HASH_SENTINEL_PERFECT_HASHES) { #pragma omp parallel for for (int index = 0; index < ((intintLCGLinearOpenCompactOpenMPHash_TableData *) table-> tableData)->numBuckets; index++) { buckets[index].key = HASH_BUCKET_STATUS_EMPTY; }} exitCode = HASH_EXIT_CODE_NORMAL; return exitCode; } int intintLCGLinearOpenCompactOpenMPHash_EmptyTable(intintHash_Table * table) { int exitCode = 0; intintLCGLinearOpenCompactOpenMPHash_Bucket *buckets = (intintLCGLinearOpenCompactOpenMPHash_Bucket *) & table-> tableData[sizeof(intintLCGLinearOpenCompactOpenMPHash_TableData)]; #pragma omp parallel for for (int index = 0; index < ((intintLCGLinearOpenCompactOpenMPHash_TableData *) table-> tableData)->numBuckets; index++) { buckets[index].key = HASH_BUCKET_STATUS_EMPTY; } exitCode = HASH_EXIT_CODE_NORMAL; return exitCode; } int intintLCGLinearOpenCompactOpenMPHash_InnerQuerySingle(char *tableData, int key, int *valueOutput) { intintLCGLinearOpenCompactOpenMPHash_Bucket *buckets = (intintLCGLinearOpenCompactOpenMPHash_Bucket *) & tableData[sizeof(intintLCGLinearOpenCompactOpenMPHash_TableData)]; int index; int exitCode; intintLCGLinearOpenCompactOpenMPHash_TableData *mytableData = (intintLCGLinearOpenCompactOpenMPHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration + c) % ((intintLCGLinearOpenCompactOpenMPHash_TableData *) tableData)->numBuckets); int old_key = __sync_val_compare_and_swap(&buckets[index].key, -1, key); if (old_key == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (old_key == key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((index == c && iteration > 0)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: *valueOutput = buckets[index].value; return HASH_EXIT_CODE_NORMAL; case HASH_SEARCH_CODE_MISMATCH: case HASH_SEARCH_CODE_EMPTY: return HASH_EXIT_CODE_KEY_DNE; default: return exitCode; } } int intintLCGLinearOpenCompactOpenMPHash_InnerQuery(char *tableData, unsigned int numKeys, int *keys, int *valuesOutput) { intintLCGLinearOpenCompactOpenMPHash_Bucket *buckets = (intintLCGLinearOpenCompactOpenMPHash_Bucket *) & tableData[sizeof(intintLCGLinearOpenCompactOpenMPHash_TableData)]; int key; int *valueOutput; int index; int exitCode; uint i; int resultExitCode = HASH_EXIT_CODE_NORMAL; for (i = 0; i < numKeys; i++) { key = keys[i]; valueOutput = &valuesOutput[i]; intintLCGLinearOpenCompactOpenMPHash_TableData *mytableData = (intintLCGLinearOpenCompactOpenMPHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration + c) % ((intintLCGLinearOpenCompactOpenMPHash_TableData *) tableData)->numBuckets); int old_key = __sync_val_compare_and_swap(&buckets[index].key, -1, key); if (old_key == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (old_key == key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((index == c && iteration > 0)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: *valueOutput = buckets[index].value; break; case HASH_SEARCH_CODE_MISMATCH: case HASH_SEARCH_CODE_EMPTY: resultExitCode = HASH_EXIT_CODE_KEY_DNE; break; default: return exitCode; } } return resultExitCode; } int intintLCGLinearOpenCompactOpenMPHash_InnerInsertSingle(char *tableData, int key, int value) { intintLCGLinearOpenCompactOpenMPHash_Bucket *buckets = (intintLCGLinearOpenCompactOpenMPHash_Bucket *) & tableData[sizeof(intintLCGLinearOpenCompactOpenMPHash_TableData)]; int index; int exitCode; intintLCGLinearOpenCompactOpenMPHash_TableData *mytableData = (intintLCGLinearOpenCompactOpenMPHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration + c) % ((intintLCGLinearOpenCompactOpenMPHash_TableData *) tableData)->numBuckets); int old_key = __sync_val_compare_and_swap(&buckets[index].key, -1, key); if (old_key == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (old_key == key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((index == c && iteration > 0)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: buckets[index].value = value; return HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = value; return HASH_EXIT_CODE_NORMAL; default: return exitCode; } } int intintLCGLinearOpenCompactOpenMPHash_InnerInsert(char *tableData, unsigned int numEntries, int *keys, int *values) { intintLCGLinearOpenCompactOpenMPHash_Bucket *buckets = (intintLCGLinearOpenCompactOpenMPHash_Bucket *) & tableData[sizeof(intintLCGLinearOpenCompactOpenMPHash_TableData)]; int resultExitCode = HASH_EXIT_CODE_NORMAL; int key; int index; int exitCode; uint i; #pragma omp parallel for for (i = 0; i < numEntries; i++) { key = keys[i]; intintLCGLinearOpenCompactOpenMPHash_TableData *mytableData = (intintLCGLinearOpenCompactOpenMPHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration + c) % ((intintLCGLinearOpenCompactOpenMPHash_TableData *) tableData)->numBuckets); int old_key = __sync_val_compare_and_swap(&buckets[index].key, -1, key); if (old_key == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (old_key == key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((index == c && iteration > 0)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: resultExitCode = HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = values[i]; break; default: resultExitCode = exitCode; } } return resultExitCode; } int intintLCGLinearOpenCompactOpenMPHash_InnerInsertSingleNoOverwrite(char *tableData, int key, int value) { intintLCGLinearOpenCompactOpenMPHash_Bucket *buckets = (intintLCGLinearOpenCompactOpenMPHash_Bucket *) & tableData[sizeof(intintLCGLinearOpenCompactOpenMPHash_TableData)]; int index; int exitCode; intintLCGLinearOpenCompactOpenMPHash_TableData *mytableData = (intintLCGLinearOpenCompactOpenMPHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration + c) % ((intintLCGLinearOpenCompactOpenMPHash_TableData *) tableData)->numBuckets); int old_key = __sync_val_compare_and_swap(&buckets[index].key, -1, key); if (old_key == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (old_key == key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((index == c && iteration > 0)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: return HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = value; return HASH_EXIT_CODE_NORMAL; default: return exitCode; } } int intintLCGLinearOpenCompactOpenMPHash_InnerInsertNoOverwrite(char *tableData, unsigned int numEntries, int *keys, int *values) { intintLCGLinearOpenCompactOpenMPHash_Bucket *buckets = (intintLCGLinearOpenCompactOpenMPHash_Bucket *) & tableData[sizeof(intintLCGLinearOpenCompactOpenMPHash_TableData)]; int resultExitCode = HASH_EXIT_CODE_NORMAL; int key; int index; int exitCode; uint i; #pragma omp parallel for for (i = 0; i < numEntries; i++) { key = keys[i]; intintLCGLinearOpenCompactOpenMPHash_TableData *mytableData = (intintLCGLinearOpenCompactOpenMPHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration + c) % ((intintLCGLinearOpenCompactOpenMPHash_TableData *) tableData)->numBuckets); int old_key = __sync_val_compare_and_swap(&buckets[index].key, -1, key); if (old_key == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (old_key == key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((index == c && iteration > 0)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: resultExitCode = HASH_EXIT_CODE_OVERWRITE; break; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = values[i]; break; default: resultExitCode = exitCode; } } return resultExitCode; } int intintLCGLinearOpenCompactOpenMPHash_QuerySingle(intintHash_Table * table, int key, int *valueOutput) { return intintLCGLinearOpenCompactOpenMPHash_InnerQuerySingle(table-> tableData, key, valueOutput); } int intintLCGLinearOpenCompactOpenMPHash_Query(intintHash_Table * table, size_t numKeys, int *keys, int *valuesOutput) { return intintLCGLinearOpenCompactOpenMPHash_InnerQuery(table->tableData, numKeys, keys, valuesOutput); } int intintLCGLinearOpenCompactOpenMPHash_InsertSingle(intintHash_Table * table, int key, int value) { return intintLCGLinearOpenCompactOpenMPHash_InnerInsertSingle(table-> tableData, key, value); } int intintLCGLinearOpenCompactOpenMPHash_Insert(intintHash_Table * table, size_t numEntries, int *keys, int *values) { return intintLCGLinearOpenCompactOpenMPHash_InnerInsert(table-> tableData, numEntries, keys, values); } int intintLCGLinearOpenCompactOpenMPHash_InsertSingleNoOverwrite(intintHash_Table * table, int key, int value) { return intintLCGLinearOpenCompactOpenMPHash_InnerInsertSingleNoOverwrite (table->tableData, key, value); } int intintLCGLinearOpenCompactOpenMPHash_InsertNoOverwrite(intintHash_Table * table, size_t numEntries, int *keys, int *values) { return intintLCGLinearOpenCompactOpenMPHash_InnerInsertNoOverwrite(table-> tableData, numEntries, keys, values); } typedef struct intintLCGQuadraticOpenCompactHash_TableData { int hashID; unsigned int numBuckets; intintHash_CompressLCGData compressFuncData; } intintLCGQuadraticOpenCompactHash_TableData; typedef struct intintLCGQuadraticOpenCompactHash_Bucket { int key; int value; } intintLCGQuadraticOpenCompactHash_Bucket; intintHash_Table *intintLCGQuadraticOpenCompactHash_CreateTable(intintHash_Factory * factory, int hashIndex, size_t keyRange, size_t numEntries, float loadFactor) { intintHash_Table *table = (intintHash_Table *) malloc(sizeof(intintHash_Table)); table->destroyFunc = &intintLCGQuadraticOpenCompactHash_DestroyTable; table->setupFunc = &intintLCGQuadraticOpenCompactHash_SetupTable; table->emptyFunc = &intintLCGQuadraticOpenCompactHash_EmptyTable; table->queryFunc = &intintLCGQuadraticOpenCompactHash_Query; table->querySingleFunc = &intintLCGQuadraticOpenCompactHash_QuerySingle; table->insertFunc = &intintLCGQuadraticOpenCompactHash_Insert; table->insertSingleFunc = &intintLCGQuadraticOpenCompactHash_InsertSingle; table->insertNoOverwriteFunc = &intintLCGQuadraticOpenCompactHash_InsertNoOverwrite; table->insertSingleNoOverwriteFunc = &intintLCGQuadraticOpenCompactHash_InsertSingleNoOverwrite; table->tableData = (char *)malloc(sizeof(intintLCGQuadraticOpenCompactHash_TableData)); ((intintLCGQuadraticOpenCompactHash_TableData *) table->tableData)-> hashID = LCG_QUADRATIC_OPEN_COMPACT_HASH_ID; ((intintLCGQuadraticOpenCompactHash_TableData *) table->tableData)-> numBuckets = (unsigned int)((double)numEntries / loadFactor); ((intintLCGQuadraticOpenCompactHash_TableData *) table->tableData)-> compressFuncData.a = HASH_LCG_A; ((intintLCGQuadraticOpenCompactHash_TableData *) table->tableData)-> compressFuncData.c = HASH_LCG_C; ((intintLCGQuadraticOpenCompactHash_TableData *) table->tableData)-> compressFuncData.m = HASH_LCG_M; ((intintLCGQuadraticOpenCompactHash_TableData *) table->tableData)-> compressFuncData.n = ((intintLCGQuadraticOpenCompactHash_TableData *) table->tableData)-> numBuckets; ((intintLCGQuadraticOpenCompactHash_TableData *) table->tableData)-> numBuckets = largestProthPrimeUnder(((intintLCGQuadraticOpenCompactHash_TableData *) table->tableData)->numBuckets); char *tempHashData = (char *)malloc(sizeof(intintLCGQuadraticOpenCompactHash_TableData) + ((intintLCGQuadraticOpenCompactHash_TableData *) table->tableData)->numBuckets * sizeof(intintLCGQuadraticOpenCompactHash_Bucket)); memcpy(tempHashData, table->tableData, sizeof(intintLCGQuadraticOpenCompactHash_TableData)); free(table->tableData); table->tableData = tempHashData; return table; } int intintLCGQuadraticOpenCompactHash_CreateFactory(intintHash_Factory * factory, int hashIndex) { factory->createFunc[hashIndex] = &intintLCGQuadraticOpenCompactHash_CreateTable; factory->destroyFunc[hashIndex] = &intintLCGQuadraticOpenCompactHash_DestroyFactory;; return HASH_EXIT_CODE_NORMAL; } int intintLCGQuadraticOpenCompactHash_DestroyFactory(intintHash_Factory * factory, int hashIndex) {; return HASH_EXIT_CODE_NORMAL; } int intintLCGQuadraticOpenCompactHash_DestroyTable(intintHash_Table * table) { int exitCode = 0; free(table->tableData); free(table); return exitCode; } int intintLCGQuadraticOpenCompactHash_SetupTable(intintHash_Table * table) { int exitCode = 0; intintLCGQuadraticOpenCompactHash_Bucket *buckets = (intintLCGQuadraticOpenCompactHash_Bucket *) & table-> tableData[sizeof(intintLCGQuadraticOpenCompactHash_TableData)]; if (intintHash_GetTableType(table) & ~HASH_SENTINEL_PERFECT_HASHES) { for (int index = 0; index < ((intintLCGQuadraticOpenCompactHash_TableData *) table-> tableData)->numBuckets; index++) { buckets[index].key = HASH_BUCKET_STATUS_EMPTY; }} exitCode = HASH_EXIT_CODE_NORMAL; return exitCode; } int intintLCGQuadraticOpenCompactHash_EmptyTable(intintHash_Table * table) { int exitCode = 0; intintLCGQuadraticOpenCompactHash_Bucket *buckets = (intintLCGQuadraticOpenCompactHash_Bucket *) & table-> tableData[sizeof(intintLCGQuadraticOpenCompactHash_TableData)]; for (int index = 0; index < ((intintLCGQuadraticOpenCompactHash_TableData *) table-> tableData)->numBuckets; index++) { buckets[index].key = HASH_BUCKET_STATUS_EMPTY; } exitCode = HASH_EXIT_CODE_NORMAL; return exitCode; } int intintLCGQuadraticOpenCompactHash_InnerQuerySingle(char *tableData, int key, int *valueOutput) { intintLCGQuadraticOpenCompactHash_Bucket *buckets = (intintLCGQuadraticOpenCompactHash_Bucket *) & tableData[sizeof(intintLCGQuadraticOpenCompactHash_TableData)]; int index; int exitCode; intintLCGQuadraticOpenCompactHash_TableData *mytableData = (intintLCGQuadraticOpenCompactHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration * iteration + 0 * iteration + c) % ((intintLCGQuadraticOpenCompactHash_TableData *) tableData)->numBuckets); if ((buckets[index].key) == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((iteration > ((intintLCGQuadraticOpenCompactHash_TableData *) tableData)->numBuckets)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: *valueOutput = buckets[index].value; return HASH_EXIT_CODE_NORMAL; case HASH_SEARCH_CODE_MISMATCH: case HASH_SEARCH_CODE_EMPTY: return HASH_EXIT_CODE_KEY_DNE; default: return exitCode; } } int intintLCGQuadraticOpenCompactHash_InnerQuery(char *tableData, unsigned int numKeys, int *keys, int *valuesOutput) { intintLCGQuadraticOpenCompactHash_Bucket *buckets = (intintLCGQuadraticOpenCompactHash_Bucket *) & tableData[sizeof(intintLCGQuadraticOpenCompactHash_TableData)]; int key; int *valueOutput; int index; int exitCode; uint i; int resultExitCode = HASH_EXIT_CODE_NORMAL; for (i = 0; i < numKeys; i++) { key = keys[i]; valueOutput = &valuesOutput[i]; intintLCGQuadraticOpenCompactHash_TableData *mytableData = (intintLCGQuadraticOpenCompactHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration * iteration + 0 * iteration + c) % ((intintLCGQuadraticOpenCompactHash_TableData *) tableData)->numBuckets); if ((buckets[index].key) == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((iteration > ((intintLCGQuadraticOpenCompactHash_TableData *) tableData)->numBuckets)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: *valueOutput = buckets[index].value; break; case HASH_SEARCH_CODE_MISMATCH: case HASH_SEARCH_CODE_EMPTY: resultExitCode = HASH_EXIT_CODE_KEY_DNE; break; default: return exitCode; } } return resultExitCode; } int intintLCGQuadraticOpenCompactHash_InnerInsertSingle(char *tableData, int key, int value) { intintLCGQuadraticOpenCompactHash_Bucket *buckets = (intintLCGQuadraticOpenCompactHash_Bucket *) & tableData[sizeof(intintLCGQuadraticOpenCompactHash_TableData)]; int index; int exitCode; intintLCGQuadraticOpenCompactHash_TableData *mytableData = (intintLCGQuadraticOpenCompactHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration * iteration + 0 * iteration + c) % ((intintLCGQuadraticOpenCompactHash_TableData *) tableData)->numBuckets); if (((buckets[index].key == HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key = key, HASH_BUCKET_STATUS_EMPTY) : buckets[index].key) == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((iteration > ((intintLCGQuadraticOpenCompactHash_TableData *) tableData)->numBuckets)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: buckets[index].value = value; return HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = value; return HASH_EXIT_CODE_NORMAL; default: return exitCode; } } int intintLCGQuadraticOpenCompactHash_InnerInsert(char *tableData, unsigned int numEntries, int *keys, int *values) { intintLCGQuadraticOpenCompactHash_Bucket *buckets = (intintLCGQuadraticOpenCompactHash_Bucket *) & tableData[sizeof(intintLCGQuadraticOpenCompactHash_TableData)]; int resultExitCode = HASH_EXIT_CODE_NORMAL; int key; int index; int exitCode; uint i;; for (i = 0; i < numEntries; i++) { key = keys[i]; intintLCGQuadraticOpenCompactHash_TableData *mytableData = (intintLCGQuadraticOpenCompactHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration * iteration + 0 * iteration + c) % ((intintLCGQuadraticOpenCompactHash_TableData *) tableData)->numBuckets); if (((buckets[index].key == HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key = key, HASH_BUCKET_STATUS_EMPTY) : buckets[index].key) == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((iteration > ((intintLCGQuadraticOpenCompactHash_TableData *) tableData)->numBuckets)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: resultExitCode = HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = values[i]; break; default: resultExitCode = exitCode; } } return resultExitCode; } int intintLCGQuadraticOpenCompactHash_InnerInsertSingleNoOverwrite(char *tableData, int key, int value) { intintLCGQuadraticOpenCompactHash_Bucket *buckets = (intintLCGQuadraticOpenCompactHash_Bucket *) & tableData[sizeof(intintLCGQuadraticOpenCompactHash_TableData)]; int index; int exitCode; intintLCGQuadraticOpenCompactHash_TableData *mytableData = (intintLCGQuadraticOpenCompactHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration * iteration + 0 * iteration + c) % ((intintLCGQuadraticOpenCompactHash_TableData *) tableData)->numBuckets); if (((buckets[index].key == HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key = key, HASH_BUCKET_STATUS_EMPTY) : buckets[index].key) == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((iteration > ((intintLCGQuadraticOpenCompactHash_TableData *) tableData)->numBuckets)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: return HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = value; return HASH_EXIT_CODE_NORMAL; default: return exitCode; } } int intintLCGQuadraticOpenCompactHash_InnerInsertNoOverwrite(char *tableData, unsigned int numEntries, int *keys, int *values) { intintLCGQuadraticOpenCompactHash_Bucket *buckets = (intintLCGQuadraticOpenCompactHash_Bucket *) & tableData[sizeof(intintLCGQuadraticOpenCompactHash_TableData)]; int resultExitCode = HASH_EXIT_CODE_NORMAL; int key; int index; int exitCode; uint i;; for (i = 0; i < numEntries; i++) { key = keys[i]; intintLCGQuadraticOpenCompactHash_TableData *mytableData = (intintLCGQuadraticOpenCompactHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration * iteration + 0 * iteration + c) % ((intintLCGQuadraticOpenCompactHash_TableData *) tableData)->numBuckets); if (((buckets[index].key == HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key = key, HASH_BUCKET_STATUS_EMPTY) : buckets[index].key) == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (key == buckets[index].key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((iteration > ((intintLCGQuadraticOpenCompactHash_TableData *) tableData)->numBuckets)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: resultExitCode = HASH_EXIT_CODE_OVERWRITE; break; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = values[i]; break; default: resultExitCode = exitCode; } } return resultExitCode; } int intintLCGQuadraticOpenCompactHash_QuerySingle(intintHash_Table * table, int key, int *valueOutput) { return intintLCGQuadraticOpenCompactHash_InnerQuerySingle(table-> tableData, key, valueOutput); } int intintLCGQuadraticOpenCompactHash_Query(intintHash_Table * table, size_t numKeys, int *keys, int *valuesOutput) { return intintLCGQuadraticOpenCompactHash_InnerQuery(table->tableData, numKeys, keys, valuesOutput); } int intintLCGQuadraticOpenCompactHash_InsertSingle(intintHash_Table * table, int key, int value) { return intintLCGQuadraticOpenCompactHash_InnerInsertSingle(table-> tableData, key, value); } int intintLCGQuadraticOpenCompactHash_Insert(intintHash_Table * table, size_t numEntries, int *keys, int *values) { return intintLCGQuadraticOpenCompactHash_InnerInsert(table->tableData, numEntries, keys, values); } int intintLCGQuadraticOpenCompactHash_InsertSingleNoOverwrite(intintHash_Table * table, int key, int value) { return intintLCGQuadraticOpenCompactHash_InnerInsertSingleNoOverwrite (table->tableData, key, value); } int intintLCGQuadraticOpenCompactHash_InsertNoOverwrite(intintHash_Table * table, size_t numEntries, int *keys, int *values) { return intintLCGQuadraticOpenCompactHash_InnerInsertNoOverwrite(table-> tableData, numEntries, keys, values); } typedef struct intintLCGQuadraticOpenCompactCLHash_TableData { int hashID; unsigned int numBuckets; intintHash_CompressLCGData compressFuncData; } intintLCGQuadraticOpenCompactCLHash_TableData; typedef struct intintLCGQuadraticOpenCompactCLHash_Bucket { int key; int value; } intintLCGQuadraticOpenCompactCLHash_Bucket; intintHash_Table *intintLCGQuadraticOpenCompactCLHash_CreateTable(intintHash_Factory * factory, int hashIndex, size_t keyRange, size_t numEntries, float loadFactor) { intintHash_Table *table = (intintHash_Table *) malloc(sizeof(intintHash_Table)); table->destroyFunc = &intintLCGQuadraticOpenCompactCLHash_DestroyTable; table->setupFunc = &intintLCGQuadraticOpenCompactCLHash_SetupTable; table->emptyFunc = &intintLCGQuadraticOpenCompactCLHash_EmptyTable; table->queryFunc = &intintLCGQuadraticOpenCompactCLHash_Query; table->querySingleFunc = &intintLCGQuadraticOpenCompactCLHash_QuerySingle; table->insertFunc = &intintLCGQuadraticOpenCompactCLHash_Insert; table->insertSingleFunc = &intintLCGQuadraticOpenCompactCLHash_InsertSingle; table->insertNoOverwriteFunc = &intintLCGQuadraticOpenCompactCLHash_InsertNoOverwrite; table->insertSingleNoOverwriteFunc = &intintLCGQuadraticOpenCompactCLHash_InsertSingleNoOverwrite; table->tableData = (char *) malloc(sizeof(intintLCGQuadraticOpenCompactCLHash_TableData)); ((intintLCGQuadraticOpenCompactCLHash_TableData *) table->tableData)-> hashID = LCG_QUADRATIC_OPEN_COMPACT_CL_HASH_ID; table->context = factory->context; table->queue = factory->queue; table->program = factory->program; table->localWorkSize = factory->localWorkSize; table->utilProgram = factory->utilProgram[hashIndex]; table->emptyKernel = factory->emptyKernel[hashIndex]; table->emptyKernelLocalWorkSize = factory->emptyKernelLocalWorkSize[hashIndex]; table->querySingleKernel = factory->querySingleKernel[hashIndex]; table->insertSingleKernel = factory->insertSingleKernel[hashIndex]; table->insertSingleNoOverwriteKernel = factory->insertSingleNoOverwriteKernel[hashIndex]; clRetainContext(table->context); clRetainCommandQueue(table->queue); clRetainProgram(table->program); clRetainProgram(table->utilProgram); clRetainKernel(table->emptyKernel); clRetainKernel(table->querySingleKernel); clRetainKernel(table->insertSingleKernel); clRetainKernel(table->insertSingleNoOverwriteKernel);; ((intintLCGQuadraticOpenCompactCLHash_TableData *) table->tableData)-> numBuckets = (unsigned int)((double)numEntries / loadFactor); ((intintLCGQuadraticOpenCompactCLHash_TableData *) table->tableData)-> compressFuncData.a = HASH_LCG_A; ((intintLCGQuadraticOpenCompactCLHash_TableData *) table->tableData)-> compressFuncData.c = HASH_LCG_C; ((intintLCGQuadraticOpenCompactCLHash_TableData *) table->tableData)-> compressFuncData.m = HASH_LCG_M; ((intintLCGQuadraticOpenCompactCLHash_TableData *) table->tableData)-> compressFuncData.n = ((intintLCGQuadraticOpenCompactCLHash_TableData *) table-> tableData)->numBuckets; ((intintLCGQuadraticOpenCompactCLHash_TableData *) table->tableData)-> numBuckets = largestProthPrimeUnder(((intintLCGQuadraticOpenCompactCLHash_TableData *) table->tableData)->numBuckets); char *tempHashData = (char *)malloc(sizeof(intintLCGQuadraticOpenCompactCLHash_TableData) + ((intintLCGQuadraticOpenCompactCLHash_TableData *) table->tableData)->numBuckets * sizeof(intintLCGQuadraticOpenCompactCLHash_Bucket)); memcpy(tempHashData, table->tableData, sizeof(intintLCGQuadraticOpenCompactCLHash_TableData)); free(table->tableData); table->tableData = tempHashData; cl_int err; table->tableDataBuffer = clCreateBuffer(table->context, CL_MEM_READ_WRITE, sizeof(intintLCGQuadraticOpenCompactHash_TableData) + ((intintLCGQuadraticOpenCompactHash_TableData *) table->tableData)->numBuckets * sizeof(intintLCGQuadraticOpenCompactHash_Bucket), NULL, &err); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_InitTable", "clCreateBuffer"); err = clEnqueueWriteBuffer(table->queue, table->tableDataBuffer, CL_TRUE, 0, sizeof (intintLCGQuadraticOpenCompactHash_TableData), table->tableData, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_InitTable", "clEnqueueWriteBuffer"); return table; } int intintLCGQuadraticOpenCompactCLHash_CreateFactory(intintHash_Factory * factory, int hashIndex) { factory->createFunc[hashIndex] = &intintLCGQuadraticOpenCompactCLHash_CreateTable; factory->destroyFunc[hashIndex] = &intintLCGQuadraticOpenCompactCLHash_DestroyFactory; cl_int error; cl_device_id device; error = clGetContextInfo(factory->context, CL_CONTEXT_DEVICES, sizeof(device), &device, NULL); CLHash_Utilities_HandleError(error, "intintHash_CreateFactory", "clGetContextInfo"); factory->querySingleKernel[hashIndex] = clCreateKernel(factory->program, "intintLCGQuadraticOpenCompactCLHash_RangeQuerySingle", &error); CLHash_Utilities_HandleError(error, "intintLCGQuadraticOpenCompactCLHash_CreateFactory", "clCreateKernel"); factory->insertSingleKernel[hashIndex] = clCreateKernel(factory->program, "intintLCGQuadraticOpenCompactCLHash_RangeInsertSingle", &error); CLHash_Utilities_HandleError(error, "intintLCGQuadraticOpenCompactCLHash_CreateFactory", "clCreateKernel"); factory->insertSingleNoOverwriteKernel[hashIndex] = clCreateKernel(factory->program, "intintLCGQuadraticOpenCompactCLHash_RangeInsertSingleNoOverwrite", &error); CLHash_Utilities_HandleError(error, "intintLCGQuadraticOpenCompactCLHash_CreateFactory", "clCreateKernel"); factory->utilProgram[hashIndex] = CLHash_Utilities_BuildProgramString(factory->context, device, "static inline unsigned int intintHash_CompressIdentity(char data, int hashCode){ return hashCode; } typedef struct intintHash_CompressLCGData{ long unsigned int a; long unsigned int c; unsigned int m; unsigned int n; }intintHash_CompressLCGData; static inline unsigned int intintHash_CompressLCG(intintHash_CompressLCGData compressLCGData, int hashCode){ return ((compressLCGData.a * hashCode + compressLCGData.c) % compressLCGData.m) % compressLCGData.n; } typedef struct intintLCGQuadraticOpenCompactCLHash_TableData{ int hashID; unsigned int numBuckets; intintHash_CompressLCGData compressFuncData; }intintLCGQuadraticOpenCompactCLHash_TableData; typedef struct intintLCGQuadraticOpenCompactCLHash_Bucket{ int key; int value; }intintLCGQuadraticOpenCompactCLHash_Bucket; __kernel void intintLCGQuadraticOpenCompactCLHash_Empty(__global char *tableData){ int index = get_global_id(0); if(index >= ((__global intintLCGQuadraticOpenCompactCLHash_TableData*)tableData)->numBuckets){ return; } __global intintLCGQuadraticOpenCompactCLHash_Bucket *buckets = (__global intintLCGQuadraticOpenCompactCLHash_Bucket*)&tableData[sizeof(intintLCGQuadraticOpenCompactCLHash_TableData)]; buckets[index].key = -1;/*HASH_BUCKET_STATUS_EMPTY*/ }"); factory->emptyKernel[hashIndex] = clCreateKernel(factory->utilProgram[hashIndex], "intintLCGQuadraticOpenCompactCLHash_Empty", &error); CLHash_Utilities_HandleError(error, "intintLCGQuadraticOpenCompactCLHash_CreateFactory", "clCreateKernel"); error = clGetKernelWorkGroupInfo(factory->emptyKernel[hashIndex], device, CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), &factory-> emptyKernelLocalWorkSize[hashIndex], NULL); CLHash_Utilities_HandleError(error, "intintLCGQuadraticOpenCompactCLHash_CreateFactory", "clGetKernelWorkGroupInfo");;; return HASH_EXIT_CODE_NORMAL; } int intintLCGQuadraticOpenCompactCLHash_DestroyFactory(intintHash_Factory * factory, int hashIndex) {; clReleaseKernel(factory->emptyKernel[hashIndex]); clReleaseProgram(factory->utilProgram[hashIndex]); clReleaseKernel(factory->querySingleKernel[hashIndex]); clReleaseKernel(factory->insertSingleKernel[hashIndex]); clReleaseKernel(factory->insertSingleNoOverwriteKernel[hashIndex]);; return HASH_EXIT_CODE_NORMAL; } int intintLCGQuadraticOpenCompactCLHash_DestroyTable(intintHash_Table * table) { int exitCode = 0; clReleaseMemObject(table->tableDataBuffer); clReleaseContext(table->context); clReleaseCommandQueue(table->queue); clReleaseProgram(table->utilProgram); clReleaseKernel(table->emptyKernel); clReleaseProgram(table->program); clReleaseKernel(table->querySingleKernel); clReleaseKernel(table->insertSingleKernel); clReleaseKernel(table->insertSingleNoOverwriteKernel); free(table->tableData); free(table); return exitCode; } int intintLCGQuadraticOpenCompactCLHash_SetupTable(intintHash_Table * table) { int exitCode = 0; cl_int err; err = clSetKernelArg(table->emptyKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_EmptyTable", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(((intintLCGQuadraticOpenCompactHash_TableData *) table->tableData)->numBuckets, table->emptyKernelLocalWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->emptyKernel, 1, 0, &groupWorkSize, (const size_t *)&table-> emptyKernelLocalWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_EmptyTable", "clEnqueueNDRangeKernel"); exitCode = HASH_EXIT_CODE_NORMAL;; return exitCode; } int intintLCGQuadraticOpenCompactCLHash_EmptyTable(intintHash_Table * table) { int exitCode = 0; cl_int err; err = clSetKernelArg(table->emptyKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_EmptyTable", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(((intintLCGQuadraticOpenCompactHash_TableData *) table->tableData)->numBuckets, table->emptyKernelLocalWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->emptyKernel, 1, 0, &groupWorkSize, (const size_t *)&table-> emptyKernelLocalWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_EmptyTable", "clEnqueueNDRangeKernel"); exitCode = HASH_EXIT_CODE_NORMAL;; return exitCode; } int intintLCGQuadraticOpenCompactCLHash_QuerySingle(intintHash_Table * table, int key, int *valueOutput) { return intintLCGQuadraticOpenCompactCLHash_Query(table, 1, &key, valueOutput); } int intintLCGQuadraticOpenCompactCLHash_Query(intintHash_Table * table, size_t numKeys, int *keys, int *valuesOutput) { cl_int err; cl_mem keysBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numKeys, keys, &err); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_Query", "clCreateBuffer"); cl_mem valuesOutputBuffer = clCreateBuffer(table->context, CL_MEM_WRITE_ONLY, sizeof(int) * numKeys, NULL, &err); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_Query", "clCreateBuffer"); intintLCGQuadraticOpenCompactCLHash_BufferQuery(table, numKeys, keysBuffer, valuesOutputBuffer); err = clEnqueueReadBuffer(table->queue, valuesOutputBuffer, CL_TRUE, 0, sizeof(int) * numKeys, valuesOutput, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_Query", "clEnqueueReadBuffer"); clReleaseMemObject(keysBuffer); clReleaseMemObject(valuesOutputBuffer); return HASH_EXIT_CODE_NORMAL; } int intintLCGQuadraticOpenCompactCLHash_BufferQuery(intintHash_Table * table, size_t numKeys, cl_mem keysBuffer, cl_mem valuesOutputBuffer) { cl_int err; err = clSetKernelArg(table->querySingleKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_BufferQuery", "clSetKernelArg"); err = clSetKernelArg(table->querySingleKernel, 1, sizeof(unsigned int), &numKeys); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_BufferQuery", "clSetKernelArg"); err = clSetKernelArg(table->querySingleKernel, 2, sizeof(cl_mem), &keysBuffer); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_BufferQuery", "clSetKernelArg"); err = clSetKernelArg(table->querySingleKernel, 3, sizeof(cl_mem), &valuesOutputBuffer); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_BufferQuery", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(numKeys, table->localWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->querySingleKernel, 1, 0, &groupWorkSize, (const size_t *)&table->localWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_BufferQuery", "clEnqueueNDRangeKernel"); clFinish(table->queue); return HASH_EXIT_CODE_NORMAL; } int intintLCGQuadraticOpenCompactCLHash_InsertSingle(intintHash_Table * table, int key, int value) { return intintLCGQuadraticOpenCompactCLHash_Insert(table, 1, &key, &value); } int intintLCGQuadraticOpenCompactCLHash_Insert(intintHash_Table * table, size_t numEntries, int *keys, int *values) { cl_int err; cl_mem keysBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numEntries, keys, &err); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_Insert", "clCreateBuffer"); cl_mem valuesBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numEntries, values, &err); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_Insert", "clCreateBuffer"); intintLCGQuadraticOpenCompactCLHash_BufferInsert(table, numEntries, keysBuffer, valuesBuffer); clReleaseMemObject(keysBuffer); clReleaseMemObject(valuesBuffer); return HASH_EXIT_CODE_NORMAL; } int intintLCGQuadraticOpenCompactCLHash_BufferInsert(intintHash_Table * table, size_t numEntries, cl_mem keysBuffer, cl_mem valuesBuffer) { cl_int err; err = clSetKernelArg(table->insertSingleKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_BufferInsert", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleKernel, 1, sizeof(unsigned int), &numEntries); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_BufferInsert", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleKernel, 2, sizeof(cl_mem), &keysBuffer); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_BufferInsert", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleKernel, 3, sizeof(cl_mem), &valuesBuffer); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_BufferInsert", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(numEntries, table->localWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->insertSingleKernel, 1, 0, &groupWorkSize, (const size_t *)&table->localWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, NULL, "clEnqueueNDRangeKernel"); return (0); } int intintLCGQuadraticOpenCompactCLHash_InsertSingleNoOverwrite(intintHash_Table * table, int key, int value) { return intintLCGQuadraticOpenCompactCLHash_InsertNoOverwrite(table, 1, &key, &value); } int intintLCGQuadraticOpenCompactCLHash_InsertNoOverwrite(intintHash_Table * table, size_t numEntries, int *keys, int *values) { cl_int err; cl_mem keysBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numEntries, keys, &err); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_InsertNoOverwrite", "clCreateBuffer"); cl_mem valuesBuffer = clCreateBuffer(table->context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(int) * numEntries, values, &err); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_InsertNoOverwrite", "clCreateBuffer"); intintLCGQuadraticOpenCompactCLHash_BufferInsertNoOverwrite(table, numEntries, keysBuffer, valuesBuffer); clReleaseMemObject(keysBuffer); clReleaseMemObject(valuesBuffer); return HASH_EXIT_CODE_NORMAL; } int intintLCGQuadraticOpenCompactCLHash_BufferInsertNoOverwrite(intintHash_Table * table, size_t numEntries, cl_mem keysBuffer, cl_mem valuesBuffer) { cl_int err; err = clSetKernelArg(table->insertSingleNoOverwriteKernel, 0, sizeof(cl_mem), &table->tableDataBuffer); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_BufferInsertNoOverwrite", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleNoOverwriteKernel, 1, sizeof(unsigned int), &numEntries); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_BufferInsertNoOverwrite", "ClSetKernelArg"); err = clSetKernelArg(table->insertSingleNoOverwriteKernel, 2, sizeof(cl_mem), &keysBuffer); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_BufferInsertNoOverwrite", "clSetKernelArg"); err = clSetKernelArg(table->insertSingleNoOverwriteKernel, 3, sizeof(cl_mem), &valuesBuffer); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_BufferInsertNoOverwrite", "clSetKernelArg"); const size_t groupWorkSize = roundUpToNearest(numEntries, table->localWorkSize); err = clEnqueueNDRangeKernel(table->queue, table->insertSingleNoOverwriteKernel, 1, 0, &groupWorkSize, (const size_t *)&table->localWorkSize, 0, NULL, NULL); CLHash_Utilities_HandleError(err, "intintLCGQuadraticOpenCompactCLHash_BufferInsertNoOverwrite", "clEnqueueNDRangeKernel"); return (0); } typedef struct intintLCGQuadraticOpenCompactOpenMPHash_TableData { int hashID; unsigned int numBuckets; intintHash_CompressLCGData compressFuncData; } intintLCGQuadraticOpenCompactOpenMPHash_TableData; typedef struct intintLCGQuadraticOpenCompactOpenMPHash_Bucket { int key; int value; } intintLCGQuadraticOpenCompactOpenMPHash_Bucket; intintHash_Table *intintLCGQuadraticOpenCompactOpenMPHash_CreateTable(intintHash_Factory * factory, int hashIndex, size_t keyRange, size_t numEntries, float loadFactor) { intintHash_Table *table = (intintHash_Table *) malloc(sizeof(intintHash_Table)); table->destroyFunc = &intintLCGQuadraticOpenCompactOpenMPHash_DestroyTable; table->setupFunc = &intintLCGQuadraticOpenCompactOpenMPHash_SetupTable; table->emptyFunc = &intintLCGQuadraticOpenCompactOpenMPHash_EmptyTable; table->queryFunc = &intintLCGQuadraticOpenCompactOpenMPHash_Query; table->querySingleFunc = &intintLCGQuadraticOpenCompactOpenMPHash_QuerySingle; table->insertFunc = &intintLCGQuadraticOpenCompactOpenMPHash_Insert; table->insertSingleFunc = &intintLCGQuadraticOpenCompactOpenMPHash_InsertSingle; table->insertNoOverwriteFunc = &intintLCGQuadraticOpenCompactOpenMPHash_InsertNoOverwrite; table->insertSingleNoOverwriteFunc = &intintLCGQuadraticOpenCompactOpenMPHash_InsertSingleNoOverwrite; table->tableData = (char *) malloc(sizeof(intintLCGQuadraticOpenCompactOpenMPHash_TableData)); ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) table-> tableData)->hashID = LCG_QUADRATIC_OPEN_COMPACT_OPENMP_HASH_ID; ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) table-> tableData)->numBuckets = (unsigned int)((double)numEntries / loadFactor); ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) table-> tableData)->compressFuncData.a = HASH_LCG_A; ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) table-> tableData)->compressFuncData.c = HASH_LCG_C; ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) table-> tableData)->compressFuncData.m = HASH_LCG_M; ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) table-> tableData)->compressFuncData.n = ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) table->tableData)->numBuckets; ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) table-> tableData)->numBuckets = largestProthPrimeUnder(((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) table->tableData)->numBuckets); char *tempHashData = (char *) malloc(sizeof(intintLCGQuadraticOpenCompactOpenMPHash_TableData) + ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) table->tableData)->numBuckets * sizeof(intintLCGQuadraticOpenCompactOpenMPHash_Bucket)); memcpy(tempHashData, table->tableData, sizeof(intintLCGQuadraticOpenCompactOpenMPHash_TableData)); free(table->tableData); table->tableData = tempHashData; return table; } int intintLCGQuadraticOpenCompactOpenMPHash_CreateFactory(intintHash_Factory * factory, int hashIndex) { factory->createFunc[hashIndex] = &intintLCGQuadraticOpenCompactOpenMPHash_CreateTable; factory->destroyFunc[hashIndex] = &intintLCGQuadraticOpenCompactOpenMPHash_DestroyFactory;; return HASH_EXIT_CODE_NORMAL; } int intintLCGQuadraticOpenCompactOpenMPHash_DestroyFactory(intintHash_Factory * factory, int hashIndex) {; return HASH_EXIT_CODE_NORMAL; } int intintLCGQuadraticOpenCompactOpenMPHash_DestroyTable(intintHash_Table * table) { int exitCode = 0; free(table->tableData); free(table); return exitCode; } int intintLCGQuadraticOpenCompactOpenMPHash_SetupTable(intintHash_Table * table) { int exitCode = 0; intintLCGQuadraticOpenCompactOpenMPHash_Bucket *buckets = (intintLCGQuadraticOpenCompactOpenMPHash_Bucket *) & table-> tableData[sizeof (intintLCGQuadraticOpenCompactOpenMPHash_TableData)]; if (intintHash_GetTableType(table) & ~HASH_SENTINEL_PERFECT_HASHES) { #pragma omp parallel for for (int index = 0; index < ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) table->tableData)->numBuckets; index++) { buckets[index].key = HASH_BUCKET_STATUS_EMPTY; }} exitCode = HASH_EXIT_CODE_NORMAL; return exitCode; } int intintLCGQuadraticOpenCompactOpenMPHash_EmptyTable(intintHash_Table * table) { int exitCode = 0; intintLCGQuadraticOpenCompactOpenMPHash_Bucket *buckets = (intintLCGQuadraticOpenCompactOpenMPHash_Bucket *) & table-> tableData[sizeof (intintLCGQuadraticOpenCompactOpenMPHash_TableData)]; #pragma omp parallel for for (int index = 0; index < ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) table-> tableData)->numBuckets; index++) { buckets[index].key = HASH_BUCKET_STATUS_EMPTY; } exitCode = HASH_EXIT_CODE_NORMAL; return exitCode; } int intintLCGQuadraticOpenCompactOpenMPHash_InnerQuerySingle(char *tableData, int key, int *valueOutput) { intintLCGQuadraticOpenCompactOpenMPHash_Bucket *buckets = (intintLCGQuadraticOpenCompactOpenMPHash_Bucket *) & tableData[sizeof (intintLCGQuadraticOpenCompactOpenMPHash_TableData)]; int index; int exitCode; intintLCGQuadraticOpenCompactOpenMPHash_TableData *mytableData = (intintLCGQuadraticOpenCompactOpenMPHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration * iteration + 0 * iteration + c) % ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) tableData)->numBuckets); int old_key = __sync_val_compare_and_swap(&buckets[index].key, -1, key); if (old_key == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (old_key == key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((iteration > ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) tableData)->numBuckets)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: *valueOutput = buckets[index].value; return HASH_EXIT_CODE_NORMAL; case HASH_SEARCH_CODE_MISMATCH: case HASH_SEARCH_CODE_EMPTY: return HASH_EXIT_CODE_KEY_DNE; default: return exitCode; } } int intintLCGQuadraticOpenCompactOpenMPHash_InnerQuery(char *tableData, unsigned int numKeys, int *keys, int *valuesOutput) { intintLCGQuadraticOpenCompactOpenMPHash_Bucket *buckets = (intintLCGQuadraticOpenCompactOpenMPHash_Bucket *) & tableData[sizeof (intintLCGQuadraticOpenCompactOpenMPHash_TableData)]; int key; int *valueOutput; int index; int exitCode; uint i; int resultExitCode = HASH_EXIT_CODE_NORMAL; for (i = 0; i < numKeys; i++) { key = keys[i]; valueOutput = &valuesOutput[i]; intintLCGQuadraticOpenCompactOpenMPHash_TableData *mytableData = (intintLCGQuadraticOpenCompactOpenMPHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration * iteration + 0 * iteration + c) % ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) tableData)->numBuckets); int old_key = __sync_val_compare_and_swap(&buckets[index].key, -1, key); if (old_key == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (old_key == key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((iteration > ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) tableData)->numBuckets)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: *valueOutput = buckets[index].value; break; case HASH_SEARCH_CODE_MISMATCH: case HASH_SEARCH_CODE_EMPTY: resultExitCode = HASH_EXIT_CODE_KEY_DNE; break; default: return exitCode; } } return resultExitCode; } int intintLCGQuadraticOpenCompactOpenMPHash_InnerInsertSingle(char *tableData, int key, int value) { intintLCGQuadraticOpenCompactOpenMPHash_Bucket *buckets = (intintLCGQuadraticOpenCompactOpenMPHash_Bucket *) & tableData[sizeof (intintLCGQuadraticOpenCompactOpenMPHash_TableData)]; int index; int exitCode; intintLCGQuadraticOpenCompactOpenMPHash_TableData *mytableData = (intintLCGQuadraticOpenCompactOpenMPHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration * iteration + 0 * iteration + c) % ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) tableData)->numBuckets); int old_key = __sync_val_compare_and_swap(&buckets[index].key, -1, key); if (old_key == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (old_key == key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((iteration > ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) tableData)->numBuckets)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: buckets[index].value = value; return HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = value; return HASH_EXIT_CODE_NORMAL; default: return exitCode; } } int intintLCGQuadraticOpenCompactOpenMPHash_InnerInsert(char *tableData, unsigned int numEntries, int *keys, int *values) { intintLCGQuadraticOpenCompactOpenMPHash_Bucket *buckets = (intintLCGQuadraticOpenCompactOpenMPHash_Bucket *) & tableData[sizeof (intintLCGQuadraticOpenCompactOpenMPHash_TableData)]; int resultExitCode = HASH_EXIT_CODE_NORMAL; int key; int index; int exitCode; uint i; #pragma omp parallel for for (i = 0; i < numEntries; i++) { key = keys[i]; intintLCGQuadraticOpenCompactOpenMPHash_TableData *mytableData = (intintLCGQuadraticOpenCompactOpenMPHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration * iteration + 0 * iteration + c) % ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) tableData)->numBuckets); int old_key = __sync_val_compare_and_swap(&buckets[index].key, -1, key); if (old_key == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (old_key == key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((iteration > ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) tableData)->numBuckets)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: resultExitCode = HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = values[i]; break; default: resultExitCode = exitCode; } } return resultExitCode; } int intintLCGQuadraticOpenCompactOpenMPHash_InnerInsertSingleNoOverwrite(char *tableData, int key, int value) { intintLCGQuadraticOpenCompactOpenMPHash_Bucket *buckets = (intintLCGQuadraticOpenCompactOpenMPHash_Bucket *) & tableData[sizeof (intintLCGQuadraticOpenCompactOpenMPHash_TableData)]; int index; int exitCode; intintLCGQuadraticOpenCompactOpenMPHash_TableData *mytableData = (intintLCGQuadraticOpenCompactOpenMPHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration * iteration + 0 * iteration + c) % ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) tableData)->numBuckets); int old_key = __sync_val_compare_and_swap(&buckets[index].key, -1, key); if (old_key == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (old_key == key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((iteration > ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) tableData)->numBuckets)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: return HASH_EXIT_CODE_OVERWRITE; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = value; return HASH_EXIT_CODE_NORMAL; default: return exitCode; } } int intintLCGQuadraticOpenCompactOpenMPHash_InnerInsertNoOverwrite(char *tableData, unsigned int numEntries, int *keys, int *values) { intintLCGQuadraticOpenCompactOpenMPHash_Bucket *buckets = (intintLCGQuadraticOpenCompactOpenMPHash_Bucket *) & tableData[sizeof (intintLCGQuadraticOpenCompactOpenMPHash_TableData)]; int resultExitCode = HASH_EXIT_CODE_NORMAL; int key; int index; int exitCode; uint i; #pragma omp parallel for for (i = 0; i < numEntries; i++) { key = keys[i]; intintLCGQuadraticOpenCompactOpenMPHash_TableData *mytableData = (intintLCGQuadraticOpenCompactOpenMPHash_TableData *) tableData; intintHash_CompressLCGData compressFuncData = mytableData->compressFuncData; unsigned int c = intintHash_CompressLCG(compressFuncData, key); unsigned long int iteration = 0; for (;;) { index = ((1 * iteration * iteration + 0 * iteration + c) % ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) tableData)->numBuckets); int old_key = __sync_val_compare_and_swap(&buckets[index].key, -1, key); if (old_key == HASH_BUCKET_STATUS_EMPTY) { exitCode = HASH_SEARCH_CODE_EMPTY; break; } else if (old_key == key) { exitCode = HASH_SEARCH_CODE_MATCH; break; } else if ((iteration > ((intintLCGQuadraticOpenCompactOpenMPHash_TableData *) tableData)->numBuckets)) { exitCode = HASH_EXIT_CODE_CYCLE; break; } iteration++; } switch (exitCode) { case HASH_SEARCH_CODE_MATCH: case HASH_SEARCH_CODE_MISMATCH: resultExitCode = HASH_EXIT_CODE_OVERWRITE; break; case HASH_SEARCH_CODE_EMPTY: buckets[index].value = values[i]; break; default: resultExitCode = exitCode; } } return resultExitCode; } int intintLCGQuadraticOpenCompactOpenMPHash_QuerySingle(intintHash_Table * table, int key, int *valueOutput) { return intintLCGQuadraticOpenCompactOpenMPHash_InnerQuerySingle(table-> tableData, key, valueOutput); } int intintLCGQuadraticOpenCompactOpenMPHash_Query(intintHash_Table * table, size_t numKeys, int *keys, int *valuesOutput) { return intintLCGQuadraticOpenCompactOpenMPHash_InnerQuery(table-> tableData, numKeys, keys, valuesOutput); } int intintLCGQuadraticOpenCompactOpenMPHash_InsertSingle(intintHash_Table * table, int key, int value) { return intintLCGQuadraticOpenCompactOpenMPHash_InnerInsertSingle(table-> tableData, key, value); } int intintLCGQuadraticOpenCompactOpenMPHash_Insert(intintHash_Table * table, size_t numEntries, int *keys, int *values) { return intintLCGQuadraticOpenCompactOpenMPHash_InnerInsert(table-> tableData, numEntries, keys, values); } int intintLCGQuadraticOpenCompactOpenMPHash_InsertSingleNoOverwrite(intintHash_Table * table, int key, int value) { return intintLCGQuadraticOpenCompactOpenMPHash_InnerInsertSingleNoOverwrite (table->tableData, key, value); } int intintLCGQuadraticOpenCompactOpenMPHash_InsertNoOverwrite(intintHash_Table * table, size_t numEntries, int *keys, int *values) { return intintLCGQuadraticOpenCompactOpenMPHash_InnerInsertNoOverwrite (table->tableData, numEntries, keys, values); } const char *HashFactory_source = "\n" "/* Copyright (C) 1991-2012 Free Software Foundation, Inc.\n" " This file is part of the GNU C Library.\n" "\n" " The GNU C Library is free software; you can redistribute it and/or\n" " modify it under the terms of the GNU Lesser General Public\n" " License as published by the Free Software Foundation; either\n" " version 2.1 of the License, or (at your option) any later version.\n" "\n" " The GNU C Library is distributed in the hope that it will be useful,\n" " but WITHOUT ANY WARRANTY; without even the implied warranty of\n" " MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n" " Lesser General Public License for more details.\n" "\n" " You should have received a copy of the GNU Lesser General Public\n" " License along with the GNU C Library; if not, see\n" " <http://www.gnu.org/licenses/>. */\n" "/* This header is separate from features.h so that the compiler can\n" " include it implicitly at the start of every compilation. It must\n" " not itself include <features.h> or any other header that includes\n" " <features.h> because the implicit include comes before any feature\n" " test macros that may be defined in a source file before it first\n" " explicitly includes a system header. GCC knows the name of this\n" " header in order to preinclude it. */\n" "/* We do support the IEC 559 math functionality, real and complex. */\n" "/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /\n" " Unicode 6.0. */\n" "/* We do not support C11 <threads.h>. */\n" "/* Copyright 2013-14. Los Alamos National Security, LLC. This material was produced\n" " * under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National \n" " * Laboratory (LANL), which is operated by Los Alamos National Security, LLC\n" " * for the U.S. Department of Energy. The U.S. Government has rights to use,\n" " * reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS\n" " * ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR\n" " * ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified\n" " * to produce derivative works, such modified software should be clearly marked,\n" " * so as not to confuse it with the version available from LANL. \n" " *\n" " * Licensed under the Apache License, Version 2.0 (the ""License""); you may not\n" " * use this file except in compliance with the License. You may obtain a copy\n" " * of the License at \n" " *\n" " * http://www.apache.org/licenses/LICENSE-2.0\n" " *\n" " * Unless required by applicable law or agreed to in writing, software distributed\n" " * under the License is distributed on an ""AS IS"" BASIS, WITHOUT WARRANTIES OR\n" " * CONDITIONS OF ANY KIND, either express or implied. See the License for the\n" " * specific language governing permissions and limitations under the License.\n" " *\n" " * Under this license, it is required to include a reference to this work. We\n" " * request that each derivative work contain a reference to LANL Copyright \n" " * Disclosure C14043/LA-CC-14-003 so that this work's impact can be roughly\n" " * measured. In addition, it is requested that a modifier is included as in\n" " * the following example:\n" " *\n" " * //<Uses | improves on | modified from> LANL Copyright Disclosure C14043/LA-CC-14-003\n" " *\n" " * This is LANL Copyright Disclosure C14043/LA-CC-14-003\n" " */\n" "int intintIdentityPerfectCLHash_InsertSingle(__global char *tableData,\n" " int key, int value);\n" "int intintIdentityPerfectCLHash_InnerInsertSingle(__global char *tableData,\n" " int key, int value);\n" "int intintHash_InsertSingle(__global char *tableData, int key, int value);\n" "int intintIdentityPerfectCLHash_InnerQuery(__global char *tableData,\n" " unsigned int numKeys,\n" " __global int *keys,\n" " __global int *valuesOutput);\n" "int intintIdentityPerfectCLHash_InnerQuerySingle(__global char *tableData,\n" " int key,\n" " __global int *valueOutput);\n" "int intintIdentityPerfectCLHash_InnerInsert(__global char *tableData,\n" " unsigned int numEntries,\n" " __global int *keys,\n" " __global int *values);\n" "int intintIdentityPerfectCLHash_InnerInsertSingleNoOverwrite(__global char\n" " *tableData,\n" " int key,\n" " int value);\n" "int intintIdentityPerfectCLHash_InnerInsertNoOverwrite(__global char *tableData,\n" " unsigned int numEntries,\n" " __global int *keys,\n" " __global int *values);\n" "int intintIdentityPerfectCLHash_QuerySingle(__global char *tableData, int key,\n" " __global int *valueOutput);\n" "int intintIdentityPerfectCLHash_QuerySingle(__global char *tableData, int key,\n" " __global int *valueOutput);\n" "int intintIdentityPerfectCLHash_Query(__global char *tableData, size_t numKeys,\n" " __global int *keys,\n" " __global int *valuesOutput);\n" "int intintIdentityPerfectCLHash_Insert(__global char *tableData,\n" " size_t numEntries, __global int *keys,\n" " __global int *values);\n" "int intintIdentityPerfectCLHash_InsertSingleNoOverwrite(__global char\n" " *tableData, int key,\n" " int value);\n" "int intintIdentityPerfectCLHash_InsertNoOverwrite(__global char *tableData,\n" " size_t numEntries,\n" " __global int *keys,\n" " __global int *values);\n" "int intintIdentitySentinelPerfectCLHash_InnerInsertNoOverwrite(__global char\n" " *tableData,\n" " unsigned int\n" " numEntries,\n" " __global int\n" " *keys,\n" " __global int\n" " *values);\n" "int intintIdentitySentinelPerfectCLHash_InnerQuerySingle(__global char\n" " *tableData, int key,\n" " __global int\n" " *valueOutput);\n" "int intintIdentitySentinelPerfectCLHash_InnerQuery(__global char *tableData,\n" " unsigned int numKeys,\n" " __global int *keys,\n" " __global int *valuesOutput);\n" "int intintIdentitySentinelPerfectCLHash_InnerInsertSingle(__global char\n" " *tableData, int key,\n" " int value);\n" "int intintIdentitySentinelPerfectCLHash_InnerInsert(__global char *tableData,\n" " unsigned int numEntries,\n" " __global int *keys,\n" " __global int *values);\n" "int intintIdentitySentinelPerfectCLHash_InnerInsertSingleNoOverwrite(__global\n" " char\n" " *tableData,\n" " int key,\n" " int value);\n" "int intintIdentitySentinelPerfectCLHash_QuerySingle(__global char *tableData,\n" " int key,\n" " __global int *valueOutput);\n" "int intintIdentitySentinelPerfectCLHash_Query(__global char *tableData,\n" " size_t numKeys,\n" " __global int *keys,\n" " __global int *valuesOutput);\n" "int intintIdentitySentinelPerfectCLHash_InsertSingle(__global char *tableData,\n" " int key, int value);\n" "int intintIdentitySentinelPerfectCLHash_Insert(__global char *tableData,\n" " size_t numEntries,\n" " __global int *keys,\n" " __global int *values);\n" "int intintIdentitySentinelPerfectCLHash_InsertSingleNoOverwrite(__global char\n" " *tableData,\n" " int key,\n" " int value);\n" "int intintIdentitySentinelPerfectCLHash_InsertNoOverwrite(__global char\n" " *tableData,\n" " size_t numEntries,\n" " __global int *keys,\n" " __global int *values);\n" "int intintLCGLinearOpenCompactCLHash_InnerQuerySingle(__global char *tableData,\n" " int key,\n" " __global int\n" " *valueOutput);\n" "int intintLCGLinearOpenCompactCLHash_QuerySingle(__global char *tableData,\n" " int key,\n" " __global int *valueOutput);\n" "int intintLCGLinearOpenCompactCLHash_Query(__global char *tableData,\n" " size_t numKeys, __global int *keys,\n" " __global int *valuesOutput);\n" "int intintLCGLinearOpenCompactCLHash_InsertSingle(__global char *tableData,\n" " int key, int value);\n" "int intintLCGLinearOpenCompactCLHash_Insert(__global char *tableData,\n" " size_t numEntries,\n" " __global int *keys,\n" " __global int *values);\n" "int intintLCGLinearOpenCompactCLHash_InsertSingleNoOverwrite(__global char\n" " *tableData,\n" " int key,\n" " int value);\n" "int intintLCGLinearOpenCompactCLHash_InsertNoOverwrite(__global char *tableData,\n" " size_t numEntries,\n" " __global int *keys,\n" " __global int *values);\n" "int intintLCGLinearOpenCompactCLHash_InnerQuery(__global char *tableData,\n" " unsigned int numKeys,\n" " __global int *keys,\n" " __global int *valuesOutput);\n" "int intintLCGLinearOpenCompactCLHash_InnerInsertNoOverwrite(__global char\n" " *tableData,\n" " unsigned int\n" " numEntries,\n" " __global int *keys,\n" " __global int\n" " *values);\n" "int intintLCGLinearOpenCompactCLHash_InnerInsertSingle(__global char *tableData,\n" " int key, int value);\n" "int intintLCGLinearOpenCompactCLHash_InnerInsertSingleNoOverwrite(__global char\n" " *tableData,\n" " int key,\n" " int value);\n" "int intintLCGLinearOpenCompactCLHash_InnerInsert(__global char *tableData,\n" " unsigned int numEntries,\n" " __global int *keys,\n" " __global int *values);\n" "int intintLCGQuadraticOpenCompactCLHash_InnerQuerySingle(__global char\n" " *tableData, int key,\n" " __global int\n" " *valueOutput);\n" "int intintLCGQuadraticOpenCompactCLHash_InnerQuery(__global char *tableData,\n" " unsigned int numKeys,\n" " __global int *keys,\n" " __global int *valuesOutput);\n" "int intintLCGQuadraticOpenCompactCLHash_InnerInsertSingle(__global char\n" " *tableData, int key,\n" " int value);\n" "int intintLCGQuadraticOpenCompactCLHash_InnerInsert(__global char *tableData,\n" " unsigned int numEntries,\n" " __global int *keys,\n" " __global int *values);\n" "int intintLCGQuadraticOpenCompactCLHash_InnerInsertSingleNoOverwrite(__global\n" " char\n" " *tableData,\n" " int key,\n" " int value);\n" "int intintLCGQuadraticOpenCompactCLHash_InnerInsertNoOverwrite(__global char\n" " *tableData,\n" " unsigned int\n" " numEntries,\n" " __global int\n" " *keys,\n" " __global int\n" " *values);\n" "int intintLCGQuadraticOpenCompactCLHash_QuerySingle(__global char *tableData,\n" " int key,\n" " __global int *valueOutput);\n" "int intintLCGQuadraticOpenCompactCLHash_Query(__global char *tableData,\n" " size_t numKeys,\n" " __global int *keys,\n" " __global int *valuesOutput);\n" "int intintLCGQuadraticOpenCompactCLHash_InsertSingle(__global char *tableData,\n" " int key, int value);\n" "int intintLCGQuadraticOpenCompactCLHash_Insert(__global char *tableData,\n" " size_t numEntries,\n" " __global int *keys,\n" " __global int *values);\n" "int intintLCGQuadraticOpenCompactCLHash_InsertSingleNoOverwrite(__global char\n" " *tableData,\n" " int key,\n" " int value);\n" "int intintLCGQuadraticOpenCompactCLHash_InsertNoOverwrite(__global char\n" " *tableData,\n" " size_t numEntries,\n" " __global int *keys,\n" " __global int *values);\n" "int intintHash_Query(__global char *tableData, unsigned int numKeys,\n" " __global int *keys, __global int *valuesOutput);\n" "int intintHash_QuerySingle(__global char *tableData, int key,\n" " __global int *valueOutput);\n" "int intintHash_Insert(__global char *tableData, unsigned int numEntries,\n" " __global int *keys, __global int *values);\n" "int intintHash_InsertNoOverwrite(__global char *tableData,\n" " unsigned int numEntries, __global int *keys,\n" " __global int *values);\n" "int intintHash_InsertSingleNoOverwrite(__global char *tableData, int key,\n" " int value);\n" "#define HASH_REPORT_NEVER /**/ 0\n" "#define HASH_REPORT_CYCLE /**/ 1\n" "#define HASH_REPORT_END /****/ 2\n" "//\n" "#define HASH_EXIT_CODE_NORMAL /****************/ -1\n" "#define HASH_EXIT_CODE_ERROR /*****************/ -2\n" "#define HASH_EXIT_CODE_OVERWRITE /*************/ -3\n" "#define HASH_EXIT_CODE_KEY_DNE /***************/ -4\n" "#define HASH_EXIT_CODE_CYCLE /*****************/ -5\n" "#define HASH_EXIT_CODE_MAX_ENTRIES_EXCEEDED /**/ -6\n" "#define HASH_EXIT_CODE_BUCKET_INDEX_OOB /******/ -7\n" "//\n" "#define HASH_SEARCH_CODE_MATCH /*****/ 0\n" "#define HASH_SEARCH_CODE_MISMATCH /**/ 1\n" "#define HASH_SEARCH_CODE_EMPTY /*****/ 2\n" "//\n" "#define IDENTITY_PERFECT_CL_HASH_ID /****************/ 16\n" "#define IDENTITY_SENTINEL_PERFECT_CL_HASH_ID /*******/ 32\n" "#define LCG_LINEAR_OPEN_COMPACT_CL_HASH_ID /*********/ 64\n" "#define LCG_QUADRATIC_OPEN_COMPACT_CL_HASH_ID /******/ 128\n" "//\n" "#define HASH_BUCKET_STATUS_EMPTY /**/ -1\n" "#define HASH_BUCKET_STATUS_FULL /***/ -2\n" "#define HASH_BUCKET_STATUS_LOCK /***/ -3\n" "static inline unsigned int intintHash_CompressIdentity(char data, int hashCode) {\n" " return hashCode;\n" "}\n" "\n" "typedef struct intintHash_CompressLCGData {\n" " long unsigned int a;\n" " long unsigned int c;\n" " unsigned int m;\n" " unsigned int n;\n" "} intintHash_CompressLCGData;\n" "static inline unsigned int intintHash_CompressLCG(intintHash_CompressLCGData\n" " compressLCGData,\n" " int hashCode) {\n" " return ((compressLCGData.a * hashCode +\n" " compressLCGData.c) % compressLCGData.m) % compressLCGData.n;\n" "}\n" "\n" "typedef struct intintIdentityPerfectCLHash_TableData {\n" " int hashID;\n" " unsigned int numBuckets;\n" " char compressFuncData;\n" "} intintIdentityPerfectCLHash_TableData;\n" "typedef struct intintIdentityPerfectCLHash_Bucket {\n" " int key;\n" " int value;\n" "} intintIdentityPerfectCLHash_Bucket;\n" "int intintIdentityPerfectCLHash_InnerQuerySingle(__global char *tableData,\n" " int key,\n" " __global int *valueOutput) {\n" " __global intintIdentityPerfectCLHash_Bucket *buckets =\n" " (__global intintIdentityPerfectCLHash_Bucket *) &\n" " tableData[sizeof(intintIdentityPerfectCLHash_TableData)];\n" " int index;\n" " int exitCode;\n" " index =\n" " intintHash_CompressIdentity(((__global\n" " intintIdentityPerfectCLHash_TableData\n" " *) tableData)->compressFuncData, key);\n" " if ((buckets[index].key) != HASH_BUCKET_STATUS_EMPTY) {\n" " if (key == buckets[index].key) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " } else {\n" " exitCode = HASH_SEARCH_CODE_MISMATCH;\n" " }\n" " } else {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " *valueOutput = buckets[index].value;\n" " return HASH_EXIT_CODE_NORMAL;\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " case HASH_SEARCH_CODE_EMPTY:\n" " return HASH_EXIT_CODE_KEY_DNE;\n" " default:\n" " return exitCode;\n" " }\n" "}\n" "int intintIdentityPerfectCLHash_InnerQuery(__global char *tableData,\n" " unsigned int numKeys,\n" " __global int *keys,\n" " __global int *valuesOutput) {\n" " __global intintIdentityPerfectCLHash_Bucket *buckets =\n" " (__global intintIdentityPerfectCLHash_Bucket *) &\n" " tableData[sizeof(intintIdentityPerfectCLHash_TableData)];\n" " int key;\n" " __global int *valueOutput;\n" " int index;\n" " int exitCode;\n" " uint i;\n" " int resultExitCode = HASH_EXIT_CODE_NORMAL;\n" " for (i = 0; i < numKeys; i++) {\n" " key = keys[i];\n" " valueOutput = &valuesOutput[i];\n" " index =\n" " intintHash_CompressIdentity(((__global\n" " intintIdentityPerfectCLHash_TableData\n" " *) tableData)->\n" " compressFuncData, key);\n" " if ((buckets[index].key) != HASH_BUCKET_STATUS_EMPTY) {\n" " if (key == buckets[index].key) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " } else {\n" " exitCode = HASH_SEARCH_CODE_MISMATCH;\n" " }\n" " } else {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " *valueOutput = buckets[index].value;\n" " break;\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " case HASH_SEARCH_CODE_EMPTY:\n" " resultExitCode = HASH_EXIT_CODE_KEY_DNE;\n" " break;\n" " default:\n" " return exitCode;\n" " }\n" " }\n" " return resultExitCode;\n" "}\n" "int intintIdentityPerfectCLHash_InnerInsertSingle(__global char *tableData,\n" " int key, int value) {\n" " __global intintIdentityPerfectCLHash_Bucket *buckets =\n" " (__global intintIdentityPerfectCLHash_Bucket *) &\n" " tableData[sizeof(intintIdentityPerfectCLHash_TableData)];\n" " int index;\n" " int exitCode;\n" " index =\n" " intintHash_CompressIdentity(((__global\n" " intintIdentityPerfectCLHash_TableData\n" " *) tableData)->compressFuncData, key);\n" " if (((buckets[index].key ==\n" " HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key =\n" " key,\n" " HASH_BUCKET_STATUS_EMPTY) :\n" " buckets[index].key) != HASH_BUCKET_STATUS_EMPTY) {\n" " if (key == buckets[index].key) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " } else {\n" " exitCode = HASH_SEARCH_CODE_MISMATCH;\n" " }\n" " } else {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " buckets[index].value = value;\n" " return HASH_EXIT_CODE_OVERWRITE;\n" " case HASH_SEARCH_CODE_EMPTY:\n" " buckets[index].value = value;\n" " return HASH_EXIT_CODE_NORMAL;\n" " default:\n" " return exitCode;\n" " }\n" "}\n" "int intintIdentityPerfectCLHash_InnerInsert(__global char *tableData,\n" " unsigned int numEntries,\n" " __global int *keys,\n" " __global int *values) {\n" " __global intintIdentityPerfectCLHash_Bucket *buckets =\n" " (__global intintIdentityPerfectCLHash_Bucket *) &\n" " tableData[sizeof(intintIdentityPerfectCLHash_TableData)];\n" " int resultExitCode = HASH_EXIT_CODE_NORMAL;\n" " int key;\n" " int index;\n" " int exitCode;\n" " uint i;;\n" " for (i = 0; i < numEntries; i++) {\n" " key = keys[i];\n" " index =\n" " intintHash_CompressIdentity(((__global\n" " intintIdentityPerfectCLHash_TableData\n" " *) tableData)->\n" " compressFuncData, key);\n" " if (((buckets[index].key ==\n" " HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key =\n" " key,\n" " HASH_BUCKET_STATUS_EMPTY) :\n" " buckets[index].key) != HASH_BUCKET_STATUS_EMPTY) {\n" " if (key == buckets[index].key) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " } else {\n" " exitCode = HASH_SEARCH_CODE_MISMATCH;\n" " }\n" " } else {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " resultExitCode = HASH_EXIT_CODE_OVERWRITE;\n" " case HASH_SEARCH_CODE_EMPTY:\n" " buckets[index].value = values[i];\n" " break;\n" " default:\n" " resultExitCode = exitCode;\n" " }\n" " }\n" " return resultExitCode;\n" "}\n" "int intintIdentityPerfectCLHash_InnerInsertSingleNoOverwrite(__global char\n" " *tableData,\n" " int key,\n" " int value) {\n" " __global intintIdentityPerfectCLHash_Bucket *buckets =\n" " (__global intintIdentityPerfectCLHash_Bucket *) &\n" " tableData[sizeof(intintIdentityPerfectCLHash_TableData)];\n" " int index;\n" " int exitCode;\n" " index =\n" " intintHash_CompressIdentity(((__global\n" " intintIdentityPerfectCLHash_TableData\n" " *) tableData)->compressFuncData, key);\n" " if (((buckets[index].key ==\n" " HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key =\n" " key,\n" " HASH_BUCKET_STATUS_EMPTY) :\n" " buckets[index].key) != HASH_BUCKET_STATUS_EMPTY) {\n" " if (key == buckets[index].key) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " } else {\n" " exitCode = HASH_SEARCH_CODE_MISMATCH;\n" " }\n" " } else {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " return HASH_EXIT_CODE_OVERWRITE;\n" " case HASH_SEARCH_CODE_EMPTY:\n" " buckets[index].value = value;\n" " return HASH_EXIT_CODE_NORMAL;\n" " default:\n" " return exitCode;\n" " }\n" "}\n" "int intintIdentityPerfectCLHash_InnerInsertNoOverwrite(__global char *tableData,\n" " unsigned int numEntries,\n" " __global int *keys,\n" " __global int *values) {\n" " __global intintIdentityPerfectCLHash_Bucket *buckets =\n" " (__global intintIdentityPerfectCLHash_Bucket *) &\n" " tableData[sizeof(intintIdentityPerfectCLHash_TableData)];\n" " int resultExitCode = HASH_EXIT_CODE_NORMAL;\n" " int key;\n" " int index;\n" " int exitCode;\n" " uint i;;\n" " for (i = 0; i < numEntries; i++) {\n" " key = keys[i];\n" " index =\n" " intintHash_CompressIdentity(((__global\n" " intintIdentityPerfectCLHash_TableData\n" " *) tableData)->\n" " compressFuncData, key);\n" " if (((buckets[index].key ==\n" " HASH_BUCKET_STATUS_EMPTY) ? (buckets[index].key =\n" " key,\n" " HASH_BUCKET_STATUS_EMPTY) :\n" " buckets[index].key) != HASH_BUCKET_STATUS_EMPTY) {\n" " if (key == buckets[index].key) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " } else {\n" " exitCode = HASH_SEARCH_CODE_MISMATCH;\n" " }\n" " } else {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " resultExitCode = HASH_EXIT_CODE_OVERWRITE;\n" " break;\n" " case HASH_SEARCH_CODE_EMPTY:\n" " buckets[index].value = values[i];\n" " break;\n" " default:\n" " resultExitCode = exitCode;\n" " }\n" " }\n" " return resultExitCode;\n" "}\n" "int intintIdentityPerfectCLHash_QuerySingle(__global char *tableData, int key,\n" " __global int *valueOutput) {\n" " return intintIdentityPerfectCLHash_InnerQuerySingle(tableData, key,\n" " valueOutput);\n" "}\n" "int intintIdentityPerfectCLHash_Query(__global char *tableData, size_t numKeys,\n" " __global int *keys,\n" " __global int *valuesOutput) {\n" " return intintIdentityPerfectCLHash_InnerQuery(tableData, numKeys, keys,\n" " valuesOutput);\n" "}\n" "int intintIdentityPerfectCLHash_InsertSingle(__global char *tableData, int key,\n" " int value) {\n" " return intintIdentityPerfectCLHash_InnerInsertSingle(tableData, key,\n" " value);\n" "}\n" "int intintIdentityPerfectCLHash_Insert(__global char *tableData,\n" " size_t numEntries, __global int *keys,\n" " __global int *values) {\n" " return intintIdentityPerfectCLHash_InnerInsert(tableData, numEntries,\n" " keys, values);\n" "}\n" "int intintIdentityPerfectCLHash_InsertSingleNoOverwrite(__global char\n" " *tableData, int key,\n" " int value) {\n" " return\n" " intintIdentityPerfectCLHash_InnerInsertSingleNoOverwrite(tableData,\n" " key,\n" " value);\n" "}\n" "int intintIdentityPerfectCLHash_InsertNoOverwrite(__global char *tableData,\n" " size_t numEntries,\n" " __global int *keys,\n" " __global int *values) {\n" " return intintIdentityPerfectCLHash_InnerInsertNoOverwrite(tableData,\n" " numEntries,\n" " keys, values);\n" "}\n" "__kernel void intintIdentityPerfectCLHash_RangeQuerySingle(__global char\n" " *tableData,\n" " unsigned int\n" " numQueries,\n" " __global int *keys,\n" " __global int\n" " *valuesOutput) {\n" " uint i = get_global_id(0);\n" " if (i >= numQueries) {\n" " return;\n" " }\n" " intintIdentityPerfectCLHash_InnerQuerySingle(tableData, keys[i],\n" " valuesOutput + i);\n" "}\n" "__kernel void intintIdentityPerfectCLHash_RangeQuery(__global char *tableData,\n" " unsigned int numQueries,\n" " unsigned int numKeys,\n" " __global int *keys,\n" " __global int\n" " *valuesOutput) {\n" " uint i = get_global_id(0);\n" " if (i >= numQueries) {\n" " return;\n" " }\n" " intintIdentityPerfectCLHash_InnerQuery(tableData, numKeys,\n" " keys + (i * numKeys),\n" " valuesOutput + (i * numKeys));\n" "}\n" "__kernel void intintIdentityPerfectCLHash_RangeInsertSingle(__global char\n" " *tableData,\n" " unsigned int\n" " numInsertions,\n" " __global int *keys,\n" " __global int\n" " *values) {\n" " uint i = get_global_id(0);\n" " if (i >= numInsertions) {\n" " return;\n" " }\n" " intintIdentityPerfectCLHash_InnerInsertSingle(tableData, keys[i],\n" " values[i]);\n" "}\n" "__kernel void intintIdentityPerfectCLHash_RangeInsert(__global char *tableData,\n" " unsigned int\n" " numInsertions,\n" " unsigned int numEntries,\n" " __global int *keys,\n" " __global int *values) {\n" " uint i = get_global_id(0);\n" " if (i >= numInsertions) {\n" " return;\n" " }\n" " intintIdentityPerfectCLHash_InnerInsert(tableData, numEntries,\n" " keys + (i * numEntries),\n" " values + (i * numEntries));\n" "}\n" "__kernel void intintIdentityPerfectCLHash_RangeInsertSingleNoOverwrite(__global\n" " char\n" " *tableData,\n" " unsigned\n" " int\n" " numInsertions,\n" " __global\n" " int\n" " *keys,\n" " __global\n" " int\n" " *values) \n" "{\n" " uint i = get_global_id(0);\n" " if (i >= numInsertions) {\n" " return;\n" " }\n" " intintIdentityPerfectCLHash_InnerInsertSingleNoOverwrite(tableData,\n" " keys[i],\n" " values[i]);\n" "}\n" "__kernel void intintIdentityPerfectCLHash_RangeInsertNoOverwrite(__global char\n" " *tableData,\n" " unsigned int\n" " numInsertions,\n" " unsigned int\n" " numEntries,\n" " __global int\n" " *keys,\n" " __global int\n" " *values) {\n" " uint i = get_global_id(0);\n" " if (i >= numInsertions) {\n" " return;\n" " }\n" " intintIdentityPerfectCLHash_InnerInsertNoOverwrite(tableData,\n" " numEntries,\n" " keys +\n" " (i * numEntries),\n" " values +\n" " (i * numEntries));\n" "}\n" "\n" "typedef struct intintIdentitySentinelPerfectCLHash_TableData {\n" " int hashID;\n" " unsigned int numBuckets;\n" " char compressFuncData;\n" " int emptyValue;\n" "} intintIdentitySentinelPerfectCLHash_TableData;\n" "typedef struct intintIdentitySentinelPerfectCLHash_Bucket {\n" " int value;\n" "} intintIdentitySentinelPerfectCLHash_Bucket;\n" "int intintIdentitySentinelPerfectCLHash_InnerQuerySingle(__global char\n" " *tableData, int key,\n" " __global int\n" " *valueOutput) {\n" " __global intintIdentitySentinelPerfectCLHash_Bucket *buckets =\n" " (__global intintIdentitySentinelPerfectCLHash_Bucket *) &\n" " tableData[sizeof(intintIdentitySentinelPerfectCLHash_TableData)];\n" " int index;\n" " int exitCode;\n" " index =\n" " intintHash_CompressIdentity(((__global\n" " intintIdentitySentinelPerfectCLHash_TableData\n" " *) tableData)->compressFuncData, key);\n" " if (buckets[index].value !=\n" " ((__global intintIdentitySentinelPerfectCLHash_TableData *)\n" " tableData)->emptyValue) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " } else {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " *valueOutput = buckets[index].value;\n" " return HASH_EXIT_CODE_NORMAL;\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " case HASH_SEARCH_CODE_EMPTY:\n" " return HASH_EXIT_CODE_KEY_DNE;\n" " default:\n" " return exitCode;\n" " }\n" "}\n" "int intintIdentitySentinelPerfectCLHash_InnerQuery(__global char *tableData,\n" " unsigned int numKeys,\n" " __global int *keys,\n" " __global int *valuesOutput) {\n" " __global intintIdentitySentinelPerfectCLHash_Bucket *buckets =\n" " (__global intintIdentitySentinelPerfectCLHash_Bucket *) &\n" " tableData[sizeof(intintIdentitySentinelPerfectCLHash_TableData)];\n" " int key;\n" " __global int *valueOutput;\n" " int index;\n" " int exitCode;\n" " uint i;\n" " int resultExitCode = HASH_EXIT_CODE_NORMAL;\n" " for (i = 0; i < numKeys; i++) {\n" " key = keys[i];\n" " valueOutput = &valuesOutput[i];\n" " index =\n" " intintHash_CompressIdentity(((__global\n" " intintIdentitySentinelPerfectCLHash_TableData\n" " *) tableData)->\n" " compressFuncData, key);\n" " if (buckets[index].value !=\n" " ((__global intintIdentitySentinelPerfectCLHash_TableData *)\n" " tableData)->emptyValue) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " } else {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " *valueOutput = buckets[index].value;\n" " break;\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " case HASH_SEARCH_CODE_EMPTY:\n" " resultExitCode = HASH_EXIT_CODE_KEY_DNE;\n" " break;\n" " default:\n" " return exitCode;\n" " }\n" " }\n" " return resultExitCode;\n" "}\n" "int intintIdentitySentinelPerfectCLHash_InnerInsertSingle(__global char\n" " *tableData, int key,\n" " int value) {\n" " __global intintIdentitySentinelPerfectCLHash_Bucket *buckets =\n" " (__global intintIdentitySentinelPerfectCLHash_Bucket *) &\n" " tableData[sizeof(intintIdentitySentinelPerfectCLHash_TableData)];\n" " int index;\n" " int exitCode;\n" " index =\n" " intintHash_CompressIdentity(((__global\n" " intintIdentitySentinelPerfectCLHash_TableData\n" " *) tableData)->compressFuncData, key);\n" " if (buckets[index].value !=\n" " ((__global intintIdentitySentinelPerfectCLHash_TableData *)\n" " tableData)->emptyValue) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " } else {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " buckets[index].value = value;\n" " return HASH_EXIT_CODE_OVERWRITE;\n" " case HASH_SEARCH_CODE_EMPTY:\n" " buckets[index].value = value;\n" " return HASH_EXIT_CODE_NORMAL;\n" " default:\n" " return exitCode;\n" " }\n" "}\n" "int intintIdentitySentinelPerfectCLHash_InnerInsert(__global char *tableData,\n" " unsigned int numEntries,\n" " __global int *keys,\n" " __global int *values) {\n" " __global intintIdentitySentinelPerfectCLHash_Bucket *buckets =\n" " (__global intintIdentitySentinelPerfectCLHash_Bucket *) &\n" " tableData[sizeof(intintIdentitySentinelPerfectCLHash_TableData)];\n" " int resultExitCode = HASH_EXIT_CODE_NORMAL;\n" " int key;\n" " int index;\n" " int exitCode;\n" " uint i;;\n" " for (i = 0; i < numEntries; i++) {\n" " key = keys[i];\n" " index =\n" " intintHash_CompressIdentity(((__global\n" " intintIdentitySentinelPerfectCLHash_TableData\n" " *) tableData)->\n" " compressFuncData, key);\n" " if (buckets[index].value !=\n" " ((__global intintIdentitySentinelPerfectCLHash_TableData *)\n" " tableData)->emptyValue) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " } else {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " resultExitCode = HASH_EXIT_CODE_OVERWRITE;\n" " case HASH_SEARCH_CODE_EMPTY:\n" " buckets[index].value = values[i];\n" " break;\n" " default:\n" " resultExitCode = exitCode;\n" " }\n" " }\n" " return resultExitCode;\n" "}\n" "int intintIdentitySentinelPerfectCLHash_InnerInsertSingleNoOverwrite(__global\n" " char\n" " *tableData,\n" " int key,\n" " int value) \n" "{\n" " __global intintIdentitySentinelPerfectCLHash_Bucket *buckets =\n" " (__global intintIdentitySentinelPerfectCLHash_Bucket *) &\n" " tableData[sizeof(intintIdentitySentinelPerfectCLHash_TableData)];\n" " int index;\n" " int exitCode;\n" " index =\n" " intintHash_CompressIdentity(((__global\n" " intintIdentitySentinelPerfectCLHash_TableData\n" " *) tableData)->compressFuncData, key);\n" " if (buckets[index].value !=\n" " ((__global intintIdentitySentinelPerfectCLHash_TableData *)\n" " tableData)->emptyValue) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " } else {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " return HASH_EXIT_CODE_OVERWRITE;\n" " case HASH_SEARCH_CODE_EMPTY:\n" " buckets[index].value = value;\n" " return HASH_EXIT_CODE_NORMAL;\n" " default:\n" " return exitCode;\n" " }\n" "}\n" "int intintIdentitySentinelPerfectCLHash_InnerInsertNoOverwrite(__global char\n" " *tableData,\n" " unsigned int\n" " numEntries,\n" " __global int\n" " *keys,\n" " __global int\n" " *values) {\n" " __global intintIdentitySentinelPerfectCLHash_Bucket *buckets =\n" " (__global intintIdentitySentinelPerfectCLHash_Bucket *) &\n" " tableData[sizeof(intintIdentitySentinelPerfectCLHash_TableData)];\n" " int resultExitCode = HASH_EXIT_CODE_NORMAL;\n" " int key;\n" " int index;\n" " int exitCode;\n" " uint i;;\n" " for (i = 0; i < numEntries; i++) {\n" " key = keys[i];\n" " index =\n" " intintHash_CompressIdentity(((__global\n" " intintIdentitySentinelPerfectCLHash_TableData\n" " *) tableData)->\n" " compressFuncData, key);\n" " if (buckets[index].value !=\n" " ((__global intintIdentitySentinelPerfectCLHash_TableData *)\n" " tableData)->emptyValue) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " } else {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " resultExitCode = HASH_EXIT_CODE_OVERWRITE;\n" " break;\n" " case HASH_SEARCH_CODE_EMPTY:\n" " buckets[index].value = values[i];\n" " break;\n" " default:\n" " resultExitCode = exitCode;\n" " }\n" " }\n" " return resultExitCode;\n" "}\n" "int intintIdentitySentinelPerfectCLHash_QuerySingle(__global char *tableData,\n" " int key,\n" " __global int *valueOutput) {\n" " return intintIdentitySentinelPerfectCLHash_InnerQuerySingle(tableData,\n" " key,\n" " valueOutput);\n" "}\n" "int intintIdentitySentinelPerfectCLHash_Query(__global char *tableData,\n" " size_t numKeys,\n" " __global int *keys,\n" " __global int *valuesOutput) {\n" " return intintIdentitySentinelPerfectCLHash_InnerQuery(tableData,\n" " numKeys, keys,\n" " valuesOutput);\n" "}\n" "int intintIdentitySentinelPerfectCLHash_InsertSingle(__global char *tableData,\n" " int key, int value) {\n" " return intintIdentitySentinelPerfectCLHash_InnerInsertSingle(tableData,\n" " key,\n" " value);\n" "}\n" "int intintIdentitySentinelPerfectCLHash_Insert(__global char *tableData,\n" " size_t numEntries,\n" " __global int *keys,\n" " __global int *values) {\n" " return intintIdentitySentinelPerfectCLHash_InnerInsert(tableData,\n" " numEntries, keys,\n" " values);\n" "}\n" "int intintIdentitySentinelPerfectCLHash_InsertSingleNoOverwrite(__global char\n" " *tableData,\n" " int key,\n" " int value) {\n" " return\n" " intintIdentitySentinelPerfectCLHash_InnerInsertSingleNoOverwrite\n" " (tableData, key, value);\n" "}\n" "int intintIdentitySentinelPerfectCLHash_InsertNoOverwrite(__global char\n" " *tableData,\n" " size_t numEntries,\n" " __global int *keys,\n" " __global int *values) \n" "{\n" " return\n" " intintIdentitySentinelPerfectCLHash_InnerInsertNoOverwrite\n" " (tableData, numEntries, keys, values);\n" "}\n" "__kernel void intintIdentitySentinelPerfectCLHash_RangeQuerySingle(__global char\n" " *tableData,\n" " unsigned int\n" " numQueries,\n" " __global int\n" " *keys,\n" " __global int\n" " *valuesOutput) \n" "{\n" " uint i = get_global_id(0);\n" " if (i >= numQueries) {\n" " return;\n" " }\n" " intintIdentitySentinelPerfectCLHash_InnerQuerySingle(tableData, keys[i],\n" " valuesOutput + i);\n" "}\n" "__kernel void intintIdentitySentinelPerfectCLHash_RangeQuery(__global char\n" " *tableData,\n" " unsigned int\n" " numQueries,\n" " unsigned int\n" " numKeys,\n" " __global int *keys,\n" " __global int\n" " *valuesOutput) {\n" " uint i = get_global_id(0);\n" " if (i >= numQueries) {\n" " return;\n" " }\n" " intintIdentitySentinelPerfectCLHash_InnerQuery(tableData, numKeys,\n" " keys + (i * numKeys),\n" " valuesOutput +\n" " (i * numKeys));\n" "}\n" "__kernel void intintIdentitySentinelPerfectCLHash_RangeInsertSingle(__global\n" " char\n" " *tableData,\n" " unsigned int\n" " numInsertions,\n" " __global int\n" " *keys,\n" " __global int\n" " *values) {\n" " uint i = get_global_id(0);\n" " if (i >= numInsertions) {\n" " return;\n" " }\n" " intintIdentitySentinelPerfectCLHash_InnerInsertSingle(tableData,\n" " keys[i],\n" " values[i]);\n" "}\n" "__kernel void intintIdentitySentinelPerfectCLHash_RangeInsert(__global char\n" " *tableData,\n" " unsigned int\n" " numInsertions,\n" " unsigned int\n" " numEntries,\n" " __global int\n" " *keys,\n" " __global int\n" " *values) {\n" " uint i = get_global_id(0);\n" " if (i >= numInsertions) {\n" " return;\n" " }\n" " intintIdentitySentinelPerfectCLHash_InnerInsert(tableData, numEntries,\n" " keys + (i * numEntries),\n" " values +\n" " (i * numEntries));\n" "}\n" "__kernel void\n" "intintIdentitySentinelPerfectCLHash_RangeInsertSingleNoOverwrite(__global char\n" " *tableData,\n" " unsigned int\n" " numInsertions,\n" " __global int\n" " *keys,\n" " __global int\n" " *values) {\n" " uint i = get_global_id(0);\n" " if (i >= numInsertions) {\n" " return;\n" " }\n" " intintIdentitySentinelPerfectCLHash_InnerInsertSingleNoOverwrite\n" " (tableData, keys[i], values[i]);\n" "}\n" "__kernel void\n" "intintIdentitySentinelPerfectCLHash_RangeInsertNoOverwrite(__global char\n" " *tableData,\n" " unsigned int\n" " numInsertions,\n" " unsigned int\n" " numEntries,\n" " __global int *keys,\n" " __global int\n" " *values) {\n" " uint i = get_global_id(0);\n" " if (i >= numInsertions) {\n" " return;\n" " }\n" " intintIdentitySentinelPerfectCLHash_InnerInsertNoOverwrite(tableData,\n" " numEntries,\n" " keys +\n" " (i *\n" " numEntries),\n" " values +\n" " (i *\n" " numEntries));\n" "}\n" "\n" "typedef struct intintLCGLinearOpenCompactCLHash_TableData {\n" " int hashID;\n" " unsigned int numBuckets;\n" " intintHash_CompressLCGData compressFuncData;\n" "} intintLCGLinearOpenCompactCLHash_TableData;\n" "typedef struct intintLCGLinearOpenCompactCLHash_Bucket {\n" " int key;\n" " int value;\n" "} intintLCGLinearOpenCompactCLHash_Bucket;\n" "int intintLCGLinearOpenCompactCLHash_InnerQuerySingle(__global char *tableData,\n" " int key,\n" " __global int\n" " *valueOutput) {\n" " __global intintLCGLinearOpenCompactCLHash_Bucket *buckets =\n" " (__global intintLCGLinearOpenCompactCLHash_Bucket *) &\n" " tableData[sizeof(intintLCGLinearOpenCompactCLHash_TableData)];\n" " int index;\n" " int exitCode;\n" " __global intintLCGLinearOpenCompactCLHash_TableData *mytableData =\n" " (__global intintLCGLinearOpenCompactCLHash_TableData *) tableData;\n" " intintHash_CompressLCGData compressFuncData =\n" " mytableData->compressFuncData;\n" " unsigned int c = intintHash_CompressLCG(compressFuncData, key);\n" " unsigned long int iteration = 0;\n" " for (;;) {\n" " index =\n" " ((1 * iteration +\n" " c) %\n" " ((__global intintLCGLinearOpenCompactCLHash_TableData *)\n" " tableData)->numBuckets);\n" " if ((buckets[index].key) == HASH_BUCKET_STATUS_EMPTY) {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " break;\n" " } else if (key == buckets[index].key) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " break;\n" " } else if ((index == c && iteration > 0)) {\n" " exitCode = HASH_EXIT_CODE_CYCLE;\n" " break;\n" " }\n" " iteration++;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " *valueOutput = buckets[index].value;\n" " return HASH_EXIT_CODE_NORMAL;\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " case HASH_SEARCH_CODE_EMPTY:\n" " return HASH_EXIT_CODE_KEY_DNE;\n" " default:\n" " return exitCode;\n" " }\n" "}\n" "int intintLCGLinearOpenCompactCLHash_InnerQuery(__global char *tableData,\n" " unsigned int numKeys,\n" " __global int *keys,\n" " __global int *valuesOutput) {\n" " __global intintLCGLinearOpenCompactCLHash_Bucket *buckets =\n" " (__global intintLCGLinearOpenCompactCLHash_Bucket *) &\n" " tableData[sizeof(intintLCGLinearOpenCompactCLHash_TableData)];\n" " int key;\n" " __global int *valueOutput;\n" " int index;\n" " int exitCode;\n" " uint i;\n" " int resultExitCode = HASH_EXIT_CODE_NORMAL;\n" " for (i = 0; i < numKeys; i++) {\n" " key = keys[i];\n" " valueOutput = &valuesOutput[i];\n" " __global intintLCGLinearOpenCompactCLHash_TableData *mytableData\n" " =\n" " (__global intintLCGLinearOpenCompactCLHash_TableData *)\n" " tableData;\n" " intintHash_CompressLCGData compressFuncData =\n" " mytableData->compressFuncData;\n" " unsigned int c = intintHash_CompressLCG(compressFuncData, key);\n" " unsigned long int iteration = 0;\n" " for (;;) {\n" " index =\n" " ((1 * iteration +\n" " c) %\n" " ((__global\n" " intintLCGLinearOpenCompactCLHash_TableData *)\n" " tableData)->numBuckets);\n" " if ((buckets[index].key) == HASH_BUCKET_STATUS_EMPTY) {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " break;\n" " } else if (key == buckets[index].key) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " break;\n" " } else if ((index == c && iteration > 0)) {\n" " exitCode = HASH_EXIT_CODE_CYCLE;\n" " break;\n" " }\n" " iteration++;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " *valueOutput = buckets[index].value;\n" " break;\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " case HASH_SEARCH_CODE_EMPTY:\n" " resultExitCode = HASH_EXIT_CODE_KEY_DNE;\n" " break;\n" " default:\n" " return exitCode;\n" " }\n" " }\n" " return resultExitCode;\n" "}\n" "int intintLCGLinearOpenCompactCLHash_InnerInsertSingle(__global char *tableData,\n" " int key, int value) {\n" " __global intintLCGLinearOpenCompactCLHash_Bucket *buckets =\n" " (__global intintLCGLinearOpenCompactCLHash_Bucket *) &\n" " tableData[sizeof(intintLCGLinearOpenCompactCLHash_TableData)];\n" " int index;\n" " int exitCode;\n" " __global intintLCGLinearOpenCompactCLHash_TableData *mytableData =\n" " (__global intintLCGLinearOpenCompactCLHash_TableData *) tableData;\n" " intintHash_CompressLCGData compressFuncData =\n" " mytableData->compressFuncData;\n" " unsigned int c = intintHash_CompressLCG(compressFuncData, key);\n" " unsigned long int iteration = 0;\n" " for (;;) {\n" " index =\n" " ((1 * iteration +\n" " c) %\n" " ((__global intintLCGLinearOpenCompactCLHash_TableData *)\n" " tableData)->numBuckets);\n" " if ((atomic_cmpxchg\n" " (&(buckets[index].key), HASH_BUCKET_STATUS_EMPTY,\n" " key)) == HASH_BUCKET_STATUS_EMPTY) {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " break;\n" " } else if (key == buckets[index].key) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " break;\n" " } else if ((index == c && iteration > 0)) {\n" " exitCode = HASH_EXIT_CODE_CYCLE;\n" " break;\n" " }\n" " iteration++;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " buckets[index].value = value;\n" " return HASH_EXIT_CODE_OVERWRITE;\n" " case HASH_SEARCH_CODE_EMPTY:\n" " buckets[index].value = value;\n" " return HASH_EXIT_CODE_NORMAL;\n" " default:\n" " return exitCode;\n" " }\n" "}\n" "int intintLCGLinearOpenCompactCLHash_InnerInsert(__global char *tableData,\n" " unsigned int numEntries,\n" " __global int *keys,\n" " __global int *values) {\n" " __global intintLCGLinearOpenCompactCLHash_Bucket *buckets =\n" " (__global intintLCGLinearOpenCompactCLHash_Bucket *) &\n" " tableData[sizeof(intintLCGLinearOpenCompactCLHash_TableData)];\n" " int resultExitCode = HASH_EXIT_CODE_NORMAL;\n" " int key;\n" " int index;\n" " int exitCode;\n" " uint i;;\n" " for (i = 0; i < numEntries; i++) {\n" " key = keys[i];\n" " __global intintLCGLinearOpenCompactCLHash_TableData *mytableData\n" " =\n" " (__global intintLCGLinearOpenCompactCLHash_TableData *)\n" " tableData;\n" " intintHash_CompressLCGData compressFuncData =\n" " mytableData->compressFuncData;\n" " unsigned int c = intintHash_CompressLCG(compressFuncData, key);\n" " unsigned long int iteration = 0;\n" " for (;;) {\n" " index =\n" " ((1 * iteration +\n" " c) %\n" " ((__global\n" " intintLCGLinearOpenCompactCLHash_TableData *)\n" " tableData)->numBuckets);\n" " if ((atomic_cmpxchg\n" " (&(buckets[index].key), HASH_BUCKET_STATUS_EMPTY,\n" " key)) == HASH_BUCKET_STATUS_EMPTY) {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " break;\n" " } else if (key == buckets[index].key) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " break;\n" " } else if ((index == c && iteration > 0)) {\n" " exitCode = HASH_EXIT_CODE_CYCLE;\n" " break;\n" " }\n" " iteration++;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " resultExitCode = HASH_EXIT_CODE_OVERWRITE;\n" " case HASH_SEARCH_CODE_EMPTY:\n" " buckets[index].value = values[i];\n" " break;\n" " default:\n" " resultExitCode = exitCode;\n" " }\n" " }\n" " return resultExitCode;\n" "}\n" "int intintLCGLinearOpenCompactCLHash_InnerInsertSingleNoOverwrite(__global char\n" " *tableData,\n" " int key,\n" " int value) {\n" " __global intintLCGLinearOpenCompactCLHash_Bucket *buckets =\n" " (__global intintLCGLinearOpenCompactCLHash_Bucket *) &\n" " tableData[sizeof(intintLCGLinearOpenCompactCLHash_TableData)];\n" " int index;\n" " int exitCode;\n" " __global intintLCGLinearOpenCompactCLHash_TableData *mytableData =\n" " (__global intintLCGLinearOpenCompactCLHash_TableData *) tableData;\n" " intintHash_CompressLCGData compressFuncData =\n" " mytableData->compressFuncData;\n" " unsigned int c = intintHash_CompressLCG(compressFuncData, key);\n" " unsigned long int iteration = 0;\n" " for (;;) {\n" " index =\n" " ((1 * iteration +\n" " c) %\n" " ((__global intintLCGLinearOpenCompactCLHash_TableData *)\n" " tableData)->numBuckets);\n" " if ((atomic_cmpxchg\n" " (&(buckets[index].key), HASH_BUCKET_STATUS_EMPTY,\n" " key)) == HASH_BUCKET_STATUS_EMPTY) {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " break;\n" " } else if (key == buckets[index].key) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " break;\n" " } else if ((index == c && iteration > 0)) {\n" " exitCode = HASH_EXIT_CODE_CYCLE;\n" " break;\n" " }\n" " iteration++;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " return HASH_EXIT_CODE_OVERWRITE;\n" " case HASH_SEARCH_CODE_EMPTY:\n" " buckets[index].value = value;\n" " return HASH_EXIT_CODE_NORMAL;\n" " default:\n" " return exitCode;\n" " }\n" "}\n" "int intintLCGLinearOpenCompactCLHash_InnerInsertNoOverwrite(__global char\n" " *tableData,\n" " unsigned int\n" " numEntries,\n" " __global int *keys,\n" " __global int\n" " *values) {\n" " __global intintLCGLinearOpenCompactCLHash_Bucket *buckets =\n" " (__global intintLCGLinearOpenCompactCLHash_Bucket *) &\n" " tableData[sizeof(intintLCGLinearOpenCompactCLHash_TableData)];\n" " int resultExitCode = HASH_EXIT_CODE_NORMAL;\n" " int key;\n" " int index;\n" " int exitCode;\n" " uint i;;\n" " for (i = 0; i < numEntries; i++) {\n" " key = keys[i];\n" " __global intintLCGLinearOpenCompactCLHash_TableData *mytableData\n" " =\n" " (__global intintLCGLinearOpenCompactCLHash_TableData *)\n" " tableData;\n" " intintHash_CompressLCGData compressFuncData =\n" " mytableData->compressFuncData;\n" " unsigned int c = intintHash_CompressLCG(compressFuncData, key);\n" " unsigned long int iteration = 0;\n" " for (;;) {\n" " index =\n" " ((1 * iteration +\n" " c) %\n" " ((__global\n" " intintLCGLinearOpenCompactCLHash_TableData *)\n" " tableData)->numBuckets);\n" " if ((atomic_cmpxchg\n" " (&(buckets[index].key), HASH_BUCKET_STATUS_EMPTY,\n" " key)) == HASH_BUCKET_STATUS_EMPTY) {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " break;\n" " } else if (key == buckets[index].key) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " break;\n" " } else if ((index == c && iteration > 0)) {\n" " exitCode = HASH_EXIT_CODE_CYCLE;\n" " break;\n" " }\n" " iteration++;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " resultExitCode = HASH_EXIT_CODE_OVERWRITE;\n" " break;\n" " case HASH_SEARCH_CODE_EMPTY:\n" " buckets[index].value = values[i];\n" " break;\n" " default:\n" " resultExitCode = exitCode;\n" " }\n" " }\n" " return resultExitCode;\n" "}\n" "int intintLCGLinearOpenCompactCLHash_QuerySingle(__global char *tableData,\n" " int key,\n" " __global int *valueOutput) {\n" " return intintLCGLinearOpenCompactCLHash_InnerQuerySingle(tableData, key,\n" " valueOutput);\n" "}\n" "int intintLCGLinearOpenCompactCLHash_Query(__global char *tableData,\n" " size_t numKeys, __global int *keys,\n" " __global int *valuesOutput) {\n" " return intintLCGLinearOpenCompactCLHash_InnerQuery(tableData, numKeys,\n" " keys, valuesOutput);\n" "}\n" "int intintLCGLinearOpenCompactCLHash_InsertSingle(__global char *tableData,\n" " int key, int value) {\n" " return intintLCGLinearOpenCompactCLHash_InnerInsertSingle(tableData,\n" " key, value);\n" "}\n" "int intintLCGLinearOpenCompactCLHash_Insert(__global char *tableData,\n" " size_t numEntries,\n" " __global int *keys,\n" " __global int *values) {\n" " return intintLCGLinearOpenCompactCLHash_InnerInsert(tableData,\n" " numEntries, keys,\n" " values);\n" "}\n" "int intintLCGLinearOpenCompactCLHash_InsertSingleNoOverwrite(__global char\n" " *tableData,\n" " int key,\n" " int value) {\n" " return\n" " intintLCGLinearOpenCompactCLHash_InnerInsertSingleNoOverwrite\n" " (tableData, key, value);\n" "}\n" "int intintLCGLinearOpenCompactCLHash_InsertNoOverwrite(__global char *tableData,\n" " size_t numEntries,\n" " __global int *keys,\n" " __global int *values) {\n" " return\n" " intintLCGLinearOpenCompactCLHash_InnerInsertNoOverwrite(tableData,\n" " numEntries,\n" " keys,\n" " values);\n" "}\n" "__kernel void intintLCGLinearOpenCompactCLHash_RangeQuerySingle(__global char\n" " *tableData,\n" " unsigned int\n" " numQueries,\n" " __global int\n" " *keys,\n" " __global int\n" " *valuesOutput) {\n" " uint i = get_global_id(0);\n" " if (i >= numQueries) {\n" " return;\n" " }\n" " intintLCGLinearOpenCompactCLHash_InnerQuerySingle(tableData, keys[i],\n" " valuesOutput + i);\n" "}\n" "__kernel void intintLCGLinearOpenCompactCLHash_RangeQuery(__global char\n" " *tableData,\n" " unsigned int\n" " numQueries,\n" " unsigned int numKeys,\n" " __global int *keys,\n" " __global int\n" " *valuesOutput) {\n" " uint i = get_global_id(0);\n" " if (i >= numQueries) {\n" " return;\n" " }\n" " intintLCGLinearOpenCompactCLHash_InnerQuery(tableData, numKeys,\n" " keys + (i * numKeys),\n" " valuesOutput +\n" " (i * numKeys));\n" "}\n" "__kernel void intintLCGLinearOpenCompactCLHash_RangeInsertSingle(__global char\n" " *tableData,\n" " unsigned int\n" " numInsertions,\n" " __global int\n" " *keys,\n" " __global int\n" " *values) {\n" " uint i = get_global_id(0);\n" " if (i >= numInsertions) {\n" " return;\n" " }\n" " intintLCGLinearOpenCompactCLHash_InnerInsertSingle(tableData, keys[i],\n" " values[i]);\n" "}\n" "__kernel void intintLCGLinearOpenCompactCLHash_RangeInsert(__global char\n" " *tableData,\n" " unsigned int\n" " numInsertions,\n" " unsigned int\n" " numEntries,\n" " __global int *keys,\n" " __global int\n" " *values) {\n" " uint i = get_global_id(0);\n" " if (i >= numInsertions) {\n" " return;\n" " }\n" " intintLCGLinearOpenCompactCLHash_InnerInsert(tableData, numEntries,\n" " keys + (i * numEntries),\n" " values + (i * numEntries));\n" "}\n" "__kernel void\n" "intintLCGLinearOpenCompactCLHash_RangeInsertSingleNoOverwrite(__global char\n" " *tableData,\n" " unsigned int\n" " numInsertions,\n" " __global int\n" " *keys,\n" " __global int\n" " *values) {\n" " uint i = get_global_id(0);\n" " if (i >= numInsertions) {\n" " return;\n" " }\n" " intintLCGLinearOpenCompactCLHash_InnerInsertSingleNoOverwrite(tableData,\n" " keys[i],\n" " values\n" " [i]);\n" "}\n" "__kernel void intintLCGLinearOpenCompactCLHash_RangeInsertNoOverwrite(__global\n" " char\n" " *tableData,\n" " unsigned\n" " int\n" " numInsertions,\n" " unsigned\n" " int\n" " numEntries,\n" " __global\n" " int *keys,\n" " __global\n" " int\n" " *values) {\n" " uint i = get_global_id(0);\n" " if (i >= numInsertions) {\n" " return;\n" " }\n" " intintLCGLinearOpenCompactCLHash_InnerInsertNoOverwrite(tableData,\n" " numEntries,\n" " keys +\n" " (i *\n" " numEntries),\n" " values +\n" " (i *\n" " numEntries));\n" "}\n" "\n" "typedef struct intintLCGQuadraticOpenCompactCLHash_TableData {\n" " int hashID;\n" " unsigned int numBuckets;\n" " intintHash_CompressLCGData compressFuncData;\n" "} intintLCGQuadraticOpenCompactCLHash_TableData;\n" "typedef struct intintLCGQuadraticOpenCompactCLHash_Bucket {\n" " int key;\n" " int value;\n" "} intintLCGQuadraticOpenCompactCLHash_Bucket;\n" "int intintLCGQuadraticOpenCompactCLHash_InnerQuerySingle(__global char\n" " *tableData, int key,\n" " __global int\n" " *valueOutput) {\n" " __global intintLCGQuadraticOpenCompactCLHash_Bucket *buckets =\n" " (__global intintLCGQuadraticOpenCompactCLHash_Bucket *) &\n" " tableData[sizeof(intintLCGQuadraticOpenCompactCLHash_TableData)];\n" " int index;\n" " int exitCode;\n" " __global intintLCGQuadraticOpenCompactCLHash_TableData *mytableData =\n" " (__global intintLCGQuadraticOpenCompactCLHash_TableData *)\n" " tableData;\n" " intintHash_CompressLCGData compressFuncData =\n" " mytableData->compressFuncData;\n" " unsigned int c = intintHash_CompressLCG(compressFuncData, key);\n" " unsigned long int iteration = 0;\n" " for (;;) {\n" " index =\n" " ((1 * iteration * iteration + 0 * iteration +\n" " c) %\n" " ((__global intintLCGQuadraticOpenCompactCLHash_TableData *)\n" " tableData)->numBuckets);\n" " if ((buckets[index].key) == HASH_BUCKET_STATUS_EMPTY) {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " break;\n" " } else if (key == buckets[index].key) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " break;\n" " } else\n" " if ((iteration >\n" " ((__global\n" " intintLCGQuadraticOpenCompactCLHash_TableData *)\n" " tableData)->numBuckets)) {\n" " exitCode = HASH_EXIT_CODE_CYCLE;\n" " break;\n" " }\n" " iteration++;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " *valueOutput = buckets[index].value;\n" " return HASH_EXIT_CODE_NORMAL;\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " case HASH_SEARCH_CODE_EMPTY:\n" " return HASH_EXIT_CODE_KEY_DNE;\n" " default:\n" " return exitCode;\n" " }\n" "}\n" "int intintLCGQuadraticOpenCompactCLHash_InnerQuery(__global char *tableData,\n" " unsigned int numKeys,\n" " __global int *keys,\n" " __global int *valuesOutput) {\n" " __global intintLCGQuadraticOpenCompactCLHash_Bucket *buckets =\n" " (__global intintLCGQuadraticOpenCompactCLHash_Bucket *) &\n" " tableData[sizeof(intintLCGQuadraticOpenCompactCLHash_TableData)];\n" " int key;\n" " __global int *valueOutput;\n" " int index;\n" " int exitCode;\n" " uint i;\n" " int resultExitCode = HASH_EXIT_CODE_NORMAL;\n" " for (i = 0; i < numKeys; i++) {\n" " key = keys[i];\n" " valueOutput = &valuesOutput[i];\n" " __global intintLCGQuadraticOpenCompactCLHash_TableData\n" " *mytableData =\n" " (__global intintLCGQuadraticOpenCompactCLHash_TableData *)\n" " tableData;\n" " intintHash_CompressLCGData compressFuncData =\n" " mytableData->compressFuncData;\n" " unsigned int c = intintHash_CompressLCG(compressFuncData, key);\n" " unsigned long int iteration = 0;\n" " for (;;) {\n" " index =\n" " ((1 * iteration * iteration + 0 * iteration +\n" " c) %\n" " ((__global\n" " intintLCGQuadraticOpenCompactCLHash_TableData *)\n" " tableData)->numBuckets);\n" " if ((buckets[index].key) == HASH_BUCKET_STATUS_EMPTY) {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " break;\n" " } else if (key == buckets[index].key) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " break;\n" " } else\n" " if ((iteration >\n" " ((__global\n" " intintLCGQuadraticOpenCompactCLHash_TableData\n" " *) tableData)->numBuckets)) {\n" " exitCode = HASH_EXIT_CODE_CYCLE;\n" " break;\n" " }\n" " iteration++;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " *valueOutput = buckets[index].value;\n" " break;\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " case HASH_SEARCH_CODE_EMPTY:\n" " resultExitCode = HASH_EXIT_CODE_KEY_DNE;\n" " break;\n" " default:\n" " return exitCode;\n" " }\n" " }\n" " return resultExitCode;\n" "}\n" "int intintLCGQuadraticOpenCompactCLHash_InnerInsertSingle(__global char\n" " *tableData, int key,\n" " int value) {\n" " __global intintLCGQuadraticOpenCompactCLHash_Bucket *buckets =\n" " (__global intintLCGQuadraticOpenCompactCLHash_Bucket *) &\n" " tableData[sizeof(intintLCGQuadraticOpenCompactCLHash_TableData)];\n" " int index;\n" " int exitCode;\n" " __global intintLCGQuadraticOpenCompactCLHash_TableData *mytableData =\n" " (__global intintLCGQuadraticOpenCompactCLHash_TableData *)\n" " tableData;\n" " intintHash_CompressLCGData compressFuncData =\n" " mytableData->compressFuncData;\n" " unsigned int c = intintHash_CompressLCG(compressFuncData, key);\n" " unsigned long int iteration = 0;\n" " for (;;) {\n" " index =\n" " ((1 * iteration * iteration + 0 * iteration +\n" " c) %\n" " ((__global intintLCGQuadraticOpenCompactCLHash_TableData *)\n" " tableData)->numBuckets);\n" " if ((atomic_cmpxchg\n" " (&(buckets[index].key), HASH_BUCKET_STATUS_EMPTY,\n" " key)) == HASH_BUCKET_STATUS_EMPTY) {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " break;\n" " } else if (key == buckets[index].key) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " break;\n" " } else\n" " if ((iteration >\n" " ((__global\n" " intintLCGQuadraticOpenCompactCLHash_TableData *)\n" " tableData)->numBuckets)) {\n" " exitCode = HASH_EXIT_CODE_CYCLE;\n" " break;\n" " }\n" " iteration++;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " buckets[index].value = value;\n" " return HASH_EXIT_CODE_OVERWRITE;\n" " case HASH_SEARCH_CODE_EMPTY:\n" " buckets[index].value = value;\n" " return HASH_EXIT_CODE_NORMAL;\n" " default:\n" " return exitCode;\n" " }\n" "}\n" "int intintLCGQuadraticOpenCompactCLHash_InnerInsert(__global char *tableData,\n" " unsigned int numEntries,\n" " __global int *keys,\n" " __global int *values) {\n" " __global intintLCGQuadraticOpenCompactCLHash_Bucket *buckets =\n" " (__global intintLCGQuadraticOpenCompactCLHash_Bucket *) &\n" " tableData[sizeof(intintLCGQuadraticOpenCompactCLHash_TableData)];\n" " int resultExitCode = HASH_EXIT_CODE_NORMAL;\n" " int key;\n" " int index;\n" " int exitCode;\n" " uint i;;\n" " for (i = 0; i < numEntries; i++) {\n" " key = keys[i];\n" " __global intintLCGQuadraticOpenCompactCLHash_TableData\n" " *mytableData =\n" " (__global intintLCGQuadraticOpenCompactCLHash_TableData *)\n" " tableData;\n" " intintHash_CompressLCGData compressFuncData =\n" " mytableData->compressFuncData;\n" " unsigned int c = intintHash_CompressLCG(compressFuncData, key);\n" " unsigned long int iteration = 0;\n" " for (;;) {\n" " index =\n" " ((1 * iteration * iteration + 0 * iteration +\n" " c) %\n" " ((__global\n" " intintLCGQuadraticOpenCompactCLHash_TableData *)\n" " tableData)->numBuckets);\n" " if ((atomic_cmpxchg\n" " (&(buckets[index].key), HASH_BUCKET_STATUS_EMPTY,\n" " key)) == HASH_BUCKET_STATUS_EMPTY) {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " break;\n" " } else if (key == buckets[index].key) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " break;\n" " } else\n" " if ((iteration >\n" " ((__global\n" " intintLCGQuadraticOpenCompactCLHash_TableData\n" " *) tableData)->numBuckets)) {\n" " exitCode = HASH_EXIT_CODE_CYCLE;\n" " break;\n" " }\n" " iteration++;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " resultExitCode = HASH_EXIT_CODE_OVERWRITE;\n" " case HASH_SEARCH_CODE_EMPTY:\n" " buckets[index].value = values[i];\n" " break;\n" " default:\n" " resultExitCode = exitCode;\n" " }\n" " }\n" " return resultExitCode;\n" "}\n" "int intintLCGQuadraticOpenCompactCLHash_InnerInsertSingleNoOverwrite(__global\n" " char\n" " *tableData,\n" " int key,\n" " int value) \n" "{\n" " __global intintLCGQuadraticOpenCompactCLHash_Bucket *buckets =\n" " (__global intintLCGQuadraticOpenCompactCLHash_Bucket *) &\n" " tableData[sizeof(intintLCGQuadraticOpenCompactCLHash_TableData)];\n" " int index;\n" " int exitCode;\n" " __global intintLCGQuadraticOpenCompactCLHash_TableData *mytableData =\n" " (__global intintLCGQuadraticOpenCompactCLHash_TableData *)\n" " tableData;\n" " intintHash_CompressLCGData compressFuncData =\n" " mytableData->compressFuncData;\n" " unsigned int c = intintHash_CompressLCG(compressFuncData, key);\n" " unsigned long int iteration = 0;\n" " for (;;) {\n" " index =\n" " ((1 * iteration * iteration + 0 * iteration +\n" " c) %\n" " ((__global intintLCGQuadraticOpenCompactCLHash_TableData *)\n" " tableData)->numBuckets);\n" " if ((atomic_cmpxchg\n" " (&(buckets[index].key), HASH_BUCKET_STATUS_EMPTY,\n" " key)) == HASH_BUCKET_STATUS_EMPTY) {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " break;\n" " } else if (key == buckets[index].key) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " break;\n" " } else\n" " if ((iteration >\n" " ((__global\n" " intintLCGQuadraticOpenCompactCLHash_TableData *)\n" " tableData)->numBuckets)) {\n" " exitCode = HASH_EXIT_CODE_CYCLE;\n" " break;\n" " }\n" " iteration++;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " return HASH_EXIT_CODE_OVERWRITE;\n" " case HASH_SEARCH_CODE_EMPTY:\n" " buckets[index].value = value;\n" " return HASH_EXIT_CODE_NORMAL;\n" " default:\n" " return exitCode;\n" " }\n" "}\n" "int intintLCGQuadraticOpenCompactCLHash_InnerInsertNoOverwrite(__global char\n" " *tableData,\n" " unsigned int\n" " numEntries,\n" " __global int\n" " *keys,\n" " __global int\n" " *values) {\n" " __global intintLCGQuadraticOpenCompactCLHash_Bucket *buckets =\n" " (__global intintLCGQuadraticOpenCompactCLHash_Bucket *) &\n" " tableData[sizeof(intintLCGQuadraticOpenCompactCLHash_TableData)];\n" " int resultExitCode = HASH_EXIT_CODE_NORMAL;\n" " int key;\n" " int index;\n" " int exitCode;\n" " uint i;;\n" " for (i = 0; i < numEntries; i++) {\n" " key = keys[i];\n" " __global intintLCGQuadraticOpenCompactCLHash_TableData\n" " *mytableData =\n" " (__global intintLCGQuadraticOpenCompactCLHash_TableData *)\n" " tableData;\n" " intintHash_CompressLCGData compressFuncData =\n" " mytableData->compressFuncData;\n" " unsigned int c = intintHash_CompressLCG(compressFuncData, key);\n" " unsigned long int iteration = 0;\n" " for (;;) {\n" " index =\n" " ((1 * iteration * iteration + 0 * iteration +\n" " c) %\n" " ((__global\n" " intintLCGQuadraticOpenCompactCLHash_TableData *)\n" " tableData)->numBuckets);\n" " if ((atomic_cmpxchg\n" " (&(buckets[index].key), HASH_BUCKET_STATUS_EMPTY,\n" " key)) == HASH_BUCKET_STATUS_EMPTY) {\n" " exitCode = HASH_SEARCH_CODE_EMPTY;\n" " break;\n" " } else if (key == buckets[index].key) {\n" " exitCode = HASH_SEARCH_CODE_MATCH;\n" " break;\n" " } else\n" " if ((iteration >\n" " ((__global\n" " intintLCGQuadraticOpenCompactCLHash_TableData\n" " *) tableData)->numBuckets)) {\n" " exitCode = HASH_EXIT_CODE_CYCLE;\n" " break;\n" " }\n" " iteration++;\n" " }\n" " switch (exitCode) {\n" " case HASH_SEARCH_CODE_MATCH:\n" " case HASH_SEARCH_CODE_MISMATCH:\n" " resultExitCode = HASH_EXIT_CODE_OVERWRITE;\n" " break;\n" " case HASH_SEARCH_CODE_EMPTY:\n" " buckets[index].value = values[i];\n" " break;\n" " default:\n" " resultExitCode = exitCode;\n" " }\n" " }\n" " return resultExitCode;\n" "}\n" "int intintLCGQuadraticOpenCompactCLHash_QuerySingle(__global char *tableData,\n" " int key,\n" " __global int *valueOutput) {\n" " return intintLCGQuadraticOpenCompactCLHash_InnerQuerySingle(tableData,\n" " key,\n" " valueOutput);\n" "}\n" "int intintLCGQuadraticOpenCompactCLHash_Query(__global char *tableData,\n" " size_t numKeys,\n" " __global int *keys,\n" " __global int *valuesOutput) {\n" " return intintLCGQuadraticOpenCompactCLHash_InnerQuery(tableData,\n" " numKeys, keys,\n" " valuesOutput);\n" "}\n" "int intintLCGQuadraticOpenCompactCLHash_InsertSingle(__global char *tableData,\n" " int key, int value) {\n" " return intintLCGQuadraticOpenCompactCLHash_InnerInsertSingle(tableData,\n" " key,\n" " value);\n" "}\n" "int intintLCGQuadraticOpenCompactCLHash_Insert(__global char *tableData,\n" " size_t numEntries,\n" " __global int *keys,\n" " __global int *values) {\n" " return intintLCGQuadraticOpenCompactCLHash_InnerInsert(tableData,\n" " numEntries, keys,\n" " values);\n" "}\n" "int intintLCGQuadraticOpenCompactCLHash_InsertSingleNoOverwrite(__global char\n" " *tableData,\n" " int key,\n" " int value) {\n" " return\n" " intintLCGQuadraticOpenCompactCLHash_InnerInsertSingleNoOverwrite\n" " (tableData, key, value);\n" "}\n" "int intintLCGQuadraticOpenCompactCLHash_InsertNoOverwrite(__global char\n" " *tableData,\n" " size_t numEntries,\n" " __global int *keys,\n" " __global int *values) \n" "{\n" " return\n" " intintLCGQuadraticOpenCompactCLHash_InnerInsertNoOverwrite\n" " (tableData, numEntries, keys, values);\n" "}\n" "__kernel void intintLCGQuadraticOpenCompactCLHash_RangeQuerySingle(__global char\n" " *tableData,\n" " unsigned int\n" " numQueries,\n" " __global int\n" " *keys,\n" " __global int\n" " *valuesOutput) \n" "{\n" " uint i = get_global_id(0);\n" " if (i >= numQueries) {\n" " return;\n" " }\n" " intintLCGQuadraticOpenCompactCLHash_InnerQuerySingle(tableData, keys[i],\n" " valuesOutput + i);\n" "}\n" "__kernel void intintLCGQuadraticOpenCompactCLHash_RangeQuery(__global char\n" " *tableData,\n" " unsigned int\n" " numQueries,\n" " unsigned int\n" " numKeys,\n" " __global int *keys,\n" " __global int\n" " *valuesOutput) {\n" " uint i = get_global_id(0);\n" " if (i >= numQueries) {\n" " return;\n" " }\n" " intintLCGQuadraticOpenCompactCLHash_InnerQuery(tableData, numKeys,\n" " keys + (i * numKeys),\n" " valuesOutput +\n" " (i * numKeys));\n" "}\n" "__kernel void intintLCGQuadraticOpenCompactCLHash_RangeInsertSingle(__global\n" " char\n" " *tableData,\n" " unsigned int\n" " numInsertions,\n" " __global int\n" " *keys,\n" " __global int\n" " *values) {\n" " uint i = get_global_id(0);\n" " if (i >= numInsertions) {\n" " return;\n" " }\n" " intintLCGQuadraticOpenCompactCLHash_InnerInsertSingle(tableData,\n" " keys[i],\n" " values[i]);\n" "}\n" "__kernel void intintLCGQuadraticOpenCompactCLHash_RangeInsert(__global char\n" " *tableData,\n" " unsigned int\n" " numInsertions,\n" " unsigned int\n" " numEntries,\n" " __global int\n" " *keys,\n" " __global int\n" " *values) {\n" " uint i = get_global_id(0);\n" " if (i >= numInsertions) {\n" " return;\n" " }\n" " intintLCGQuadraticOpenCompactCLHash_InnerInsert(tableData, numEntries,\n" " keys + (i * numEntries),\n" " values +\n" " (i * numEntries));\n" "}\n" "__kernel void\n" "intintLCGQuadraticOpenCompactCLHash_RangeInsertSingleNoOverwrite(__global char\n" " *tableData,\n" " unsigned int\n" " numInsertions,\n" " __global int\n" " *keys,\n" " __global int\n" " *values) {\n" " uint i = get_global_id(0);\n" " if (i >= numInsertions) {\n" " return;\n" " }\n" " intintLCGQuadraticOpenCompactCLHash_InnerInsertSingleNoOverwrite\n" " (tableData, keys[i], values[i]);\n" "}\n" "__kernel void\n" "intintLCGQuadraticOpenCompactCLHash_RangeInsertNoOverwrite(__global char\n" " *tableData,\n" " unsigned int\n" " numInsertions,\n" " unsigned int\n" " numEntries,\n" " __global int *keys,\n" " __global int\n" " *values) {\n" " uint i = get_global_id(0);\n" " if (i >= numInsertions) {\n" " return;\n" " }\n" " intintLCGQuadraticOpenCompactCLHash_InnerInsertNoOverwrite(tableData,\n" " numEntries,\n" " keys +\n" " (i *\n" " numEntries),\n" " values +\n" " (i *\n" " numEntries));\n" "}\n" "__kernel void intintHash_RangeQuery(__global char *tableData,\n" " unsigned int numQueries,\n" " unsigned int numKeys, __global int *keys,\n" " __global int *valuesOutput) {\n" " switch (((__global int *)tableData)[0]) {\n" " case IDENTITY_PERFECT_CL_HASH_ID:\n" " return intintIdentityPerfectCLHash_RangeQuery(tableData,\n" " numQueries,\n" " numKeys, keys,\n" " valuesOutput);\n" " case IDENTITY_SENTINEL_PERFECT_CL_HASH_ID:\n" " return intintIdentitySentinelPerfectCLHash_RangeQuery(tableData,\n" " numQueries,\n" " numKeys,\n" " keys,\n" " valuesOutput);\n" " case LCG_LINEAR_OPEN_COMPACT_CL_HASH_ID:\n" " return intintLCGLinearOpenCompactCLHash_RangeQuery(tableData,\n" " numQueries,\n" " numKeys,\n" " keys,\n" " valuesOutput);\n" " case LCG_QUADRATIC_OPEN_COMPACT_CL_HASH_ID:\n" " return intintLCGQuadraticOpenCompactCLHash_RangeQuery(tableData,\n" " numQueries,\n" " numKeys,\n" " keys,\n" " valuesOutput);\n" " }\n" "}\n" "__kernel void intintHash_RangeQuerySingle(__global char *tableData,\n" " unsigned int numQueries,\n" " __global int *keys,\n" " __global int *valueOutput) {\n" " switch (((__global int *)tableData)[0]) {\n" " case IDENTITY_PERFECT_CL_HASH_ID:\n" " return intintIdentityPerfectCLHash_RangeQuerySingle(tableData,\n" " numQueries,\n" " keys,\n" " valueOutput);\n" " case IDENTITY_SENTINEL_PERFECT_CL_HASH_ID:\n" " return\n" " intintIdentitySentinelPerfectCLHash_RangeQuerySingle\n" " (tableData, numQueries, keys, valueOutput);\n" " case LCG_LINEAR_OPEN_COMPACT_CL_HASH_ID:\n" " return\n" " intintLCGLinearOpenCompactCLHash_RangeQuerySingle(tableData,\n" " numQueries,\n" " keys,\n" " valueOutput);\n" " case LCG_QUADRATIC_OPEN_COMPACT_CL_HASH_ID:\n" " return\n" " intintLCGQuadraticOpenCompactCLHash_RangeQuerySingle\n" " (tableData, numQueries, keys, valueOutput);\n" " }\n" "}\n" "__kernel void intintHash_RangeInsert(__global char *tableData,\n" " unsigned int numInsertions,\n" " unsigned int numEntries,\n" " __global int *keys, __global int *values) {\n" " switch (((__global int *)tableData)[0]) {\n" " case IDENTITY_PERFECT_CL_HASH_ID:\n" " return intintIdentityPerfectCLHash_RangeInsert(tableData,\n" " numInsertions,\n" " numEntries, keys,\n" " values);\n" " case IDENTITY_SENTINEL_PERFECT_CL_HASH_ID:\n" " return\n" " intintIdentitySentinelPerfectCLHash_RangeInsert(tableData,\n" " numInsertions,\n" " numEntries,\n" " keys,\n" " values);\n" " case LCG_LINEAR_OPEN_COMPACT_CL_HASH_ID:\n" " return intintLCGLinearOpenCompactCLHash_RangeInsert(tableData,\n" " numInsertions,\n" " numEntries,\n" " keys,\n" " values);\n" " case LCG_QUADRATIC_OPEN_COMPACT_CL_HASH_ID:\n" " return\n" " intintLCGQuadraticOpenCompactCLHash_RangeInsert(tableData,\n" " numInsertions,\n" " numEntries,\n" " keys,\n" " values);\n" " }\n" "}\n" "__kernel void intintHash_RangeInsertSingle(__global char *tableData,\n" " unsigned int numInsertions,\n" " __global int *keys,\n" " __global int *values) {\n" " switch (((__global int *)tableData)[0]) {\n" " case IDENTITY_PERFECT_CL_HASH_ID:\n" " return intintIdentityPerfectCLHash_RangeInsertSingle(tableData,\n" " numInsertions,\n" " keys,\n" " values);\n" " case IDENTITY_SENTINEL_PERFECT_CL_HASH_ID:\n" " return\n" " intintIdentitySentinelPerfectCLHash_RangeInsertSingle\n" " (tableData, numInsertions, keys, values);\n" " case LCG_LINEAR_OPEN_COMPACT_CL_HASH_ID:\n" " return\n" " intintLCGLinearOpenCompactCLHash_RangeInsertSingle\n" " (tableData, numInsertions, keys, values);\n" " case LCG_QUADRATIC_OPEN_COMPACT_CL_HASH_ID:\n" " return\n" " intintLCGQuadraticOpenCompactCLHash_RangeInsertSingle\n" " (tableData, numInsertions, keys, values);\n" " }\n" "}\n" "__kernel void intintHash_RangeInsertNoOverwrite(__global char *tableData,\n" " unsigned int numInsertions,\n" " unsigned int numEntries,\n" " __global int *keys,\n" " __global int *values) {\n" " switch (((__global int *)tableData)[0]) {\n" " case IDENTITY_PERFECT_CL_HASH_ID:\n" " return\n" " intintIdentityPerfectCLHash_RangeInsertNoOverwrite\n" " (tableData, numInsertions, numEntries, keys, values);\n" " case IDENTITY_SENTINEL_PERFECT_CL_HASH_ID:\n" " return\n" " intintIdentitySentinelPerfectCLHash_RangeInsertNoOverwrite\n" " (tableData, numInsertions, numEntries, keys, values);\n" " case LCG_LINEAR_OPEN_COMPACT_CL_HASH_ID:\n" " return\n" " intintLCGLinearOpenCompactCLHash_RangeInsertNoOverwrite\n" " (tableData, numInsertions, numEntries, keys, values);\n" " case LCG_QUADRATIC_OPEN_COMPACT_CL_HASH_ID:\n" " return\n" " intintLCGQuadraticOpenCompactCLHash_RangeInsertNoOverwrite\n" " (tableData, numInsertions, numEntries, keys, values);\n" " }\n" "}\n" "__kernel void intintHash_RangeInsertSingleNoOverwrite(__global char *tableData,\n" " unsigned int\n" " numInsertions,\n" " __global int *keys,\n" " __global int *values) {\n" " switch (((__global int *)tableData)[0]) {\n" " case IDENTITY_PERFECT_CL_HASH_ID:\n" " return\n" " intintIdentityPerfectCLHash_RangeInsertSingleNoOverwrite\n" " (tableData, numInsertions, keys, values);\n" " case IDENTITY_SENTINEL_PERFECT_CL_HASH_ID:\n" " return\n" " intintIdentitySentinelPerfectCLHash_RangeInsertSingleNoOverwrite\n" " (tableData, numInsertions, keys, values);\n" " case LCG_LINEAR_OPEN_COMPACT_CL_HASH_ID:\n" " return\n" " intintLCGLinearOpenCompactCLHash_RangeInsertSingleNoOverwrite\n" " (tableData, numInsertions, keys, values);\n" " case LCG_QUADRATIC_OPEN_COMPACT_CL_HASH_ID:\n" " return\n" " intintLCGQuadraticOpenCompactCLHash_RangeInsertSingleNoOverwrite\n" " (tableData, numInsertions, keys, values);\n" " }\n" "}\n" "int intintHash_Query(__global char *tableData, unsigned int numKeys,\n" " __global int *keys, __global int *valuesOutput) {\n" " switch (((__global int *)tableData)[0]) {\n" " case IDENTITY_PERFECT_CL_HASH_ID:\n" " return intintIdentityPerfectCLHash_Query(tableData, numKeys,\n" " keys, valuesOutput);\n" " case IDENTITY_SENTINEL_PERFECT_CL_HASH_ID:\n" " return intintIdentitySentinelPerfectCLHash_Query(tableData,\n" " numKeys, keys,\n" " valuesOutput);\n" " case LCG_LINEAR_OPEN_COMPACT_CL_HASH_ID:\n" " return intintLCGLinearOpenCompactCLHash_Query(tableData,\n" " numKeys, keys,\n" " valuesOutput);\n" " case LCG_QUADRATIC_OPEN_COMPACT_CL_HASH_ID:\n" " return intintLCGQuadraticOpenCompactCLHash_Query(tableData,\n" " numKeys, keys,\n" " valuesOutput);\n" " }\n" " return HASH_EXIT_CODE_ERROR;\n" "}\n" "int intintHash_QuerySingle(__global char *tableData, int key,\n" " __global int *valueOutput) {\n" " switch (((__global int *)tableData)[0]) {\n" " case IDENTITY_PERFECT_CL_HASH_ID:\n" " return intintIdentityPerfectCLHash_QuerySingle(tableData, key,\n" " valueOutput);\n" " case IDENTITY_SENTINEL_PERFECT_CL_HASH_ID:\n" " return\n" " intintIdentitySentinelPerfectCLHash_QuerySingle(tableData,\n" " key,\n" " valueOutput);\n" " case LCG_LINEAR_OPEN_COMPACT_CL_HASH_ID:\n" " return intintLCGLinearOpenCompactCLHash_QuerySingle(tableData,\n" " key,\n" " valueOutput);\n" " case LCG_QUADRATIC_OPEN_COMPACT_CL_HASH_ID:\n" " return\n" " intintLCGQuadraticOpenCompactCLHash_QuerySingle(tableData,\n" " key,\n" " valueOutput);\n" " }\n" " return HASH_EXIT_CODE_ERROR;\n" "}\n" "int intintHash_Insert(__global char *tableData, unsigned int numEntries,\n" " __global int *keys, __global int *values) {\n" " switch (((__global int *)tableData)[0]) {\n" " case IDENTITY_PERFECT_CL_HASH_ID:\n" " return intintIdentityPerfectCLHash_Insert(tableData, numEntries,\n" " keys, values);\n" " case IDENTITY_SENTINEL_PERFECT_CL_HASH_ID:\n" " return intintIdentitySentinelPerfectCLHash_Insert(tableData,\n" " numEntries,\n" " keys, values);\n" " case LCG_LINEAR_OPEN_COMPACT_CL_HASH_ID:\n" " return intintLCGLinearOpenCompactCLHash_Insert(tableData,\n" " numEntries, keys,\n" " values);\n" " case LCG_QUADRATIC_OPEN_COMPACT_CL_HASH_ID:\n" " return intintLCGQuadraticOpenCompactCLHash_Insert(tableData,\n" " numEntries,\n" " keys, values);\n" " }\n" " return HASH_EXIT_CODE_ERROR;\n" "}\n" "int intintHash_InsertSingle(__global char *tableData, int key, int value) {\n" " switch (((__global int *)tableData)[0]) {\n" " case IDENTITY_PERFECT_CL_HASH_ID:\n" " return intintIdentityPerfectCLHash_InsertSingle(tableData, key,\n" " value);\n" " case IDENTITY_SENTINEL_PERFECT_CL_HASH_ID:\n" " return\n" " intintIdentitySentinelPerfectCLHash_InsertSingle(tableData,\n" " key,\n" " value);\n" " case LCG_LINEAR_OPEN_COMPACT_CL_HASH_ID:\n" " return intintLCGLinearOpenCompactCLHash_InsertSingle(tableData,\n" " key,\n" " value);\n" " case LCG_QUADRATIC_OPEN_COMPACT_CL_HASH_ID:\n" " return\n" " intintLCGQuadraticOpenCompactCLHash_InsertSingle(tableData,\n" " key,\n" " value);\n" " }\n" " return HASH_EXIT_CODE_ERROR;\n" "}\n" "int intintHash_InsertNoOverwrite(__global char *tableData,\n" " unsigned int numEntries, __global int *keys,\n" " __global int *values) {\n" " switch (((__global int *)tableData)[0]) {\n" " case IDENTITY_PERFECT_CL_HASH_ID:\n" " return intintIdentityPerfectCLHash_InsertNoOverwrite(tableData,\n" " numEntries,\n" " keys,\n" " values);\n" " case IDENTITY_SENTINEL_PERFECT_CL_HASH_ID:\n" " return\n" " intintIdentitySentinelPerfectCLHash_InsertNoOverwrite\n" " (tableData, numEntries, keys, values);\n" " case LCG_LINEAR_OPEN_COMPACT_CL_HASH_ID:\n" " return\n" " intintLCGLinearOpenCompactCLHash_InsertNoOverwrite\n" " (tableData, numEntries, keys, values);\n" " case LCG_QUADRATIC_OPEN_COMPACT_CL_HASH_ID:\n" " return\n" " intintLCGQuadraticOpenCompactCLHash_InsertNoOverwrite\n" " (tableData, numEntries, keys, values);\n" " }\n" " return HASH_EXIT_CODE_ERROR;\n" "}\n" "int intintHash_InsertSingleNoOverwrite(__global char *tableData, int key,\n" " int value) {\n" " switch (((__global int *)tableData)[0]) {\n" " case IDENTITY_PERFECT_CL_HASH_ID:\n" " return\n" " intintIdentityPerfectCLHash_InsertSingleNoOverwrite\n" " (tableData, key, value);\n" " case IDENTITY_SENTINEL_PERFECT_CL_HASH_ID:\n" " return\n" " intintIdentitySentinelPerfectCLHash_InsertSingleNoOverwrite\n" " (tableData, key, value);\n" " case LCG_LINEAR_OPEN_COMPACT_CL_HASH_ID:\n" " return\n" " intintLCGLinearOpenCompactCLHash_InsertSingleNoOverwrite\n" " (tableData, key, value);\n" " case LCG_QUADRATIC_OPEN_COMPACT_CL_HASH_ID:\n" " return\n" " intintLCGQuadraticOpenCompactCLHash_InsertSingleNoOverwrite\n" " (tableData, key, value);\n" " }\n" " return HASH_EXIT_CODE_ERROR;\n" "}\n" ;
QuickSort.c
#include<stdio.h> #include<stdlib.h> #include<omp.h> #define INF 999 void swap(int *data,int i,int j){ int tmp=data[i]; data[i]=data[j]; data[j]=tmp; } int partition(int *data,int st,int en) { int pivot=data[st]; int i=st,j=en; while(1){ for(;data[i]<pivot;i++); for(;data[j]>pivot;j--); if(i<j) swap(data,i,j); else return j; } } void QuickSort(int *data,int st,int en) { int pivot; #pragma omp parallel { if(st<en) { pivot=partition(data,st,en); QuickSort(data,st,pivot-1); QuickSort(data,pivot+1,en); } } } int main() { int data[]={123,4531,214,534,68}; int cnt=5,i; cout<<"\nEnter Data count : "; cin>>cnt; data=new int[cnt+1]; cout<<"\nEnter "<<cnt<<" data elements : "<<endl; for(int i=0; i<cnt; i++){ cin>>data[i]; } data[cnt]=INF; QuickSort(data,0,cnt-1); printf("\nData elements : \n"); for( i=0; i<cnt; i++){ printf("%d\t",data[i]); } return 0; }
o5logon_fmt_plug.c
/* Cracker for Oracle's O5LOGON protocol hashes. Hacked together during * September of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * O5LOGON is used since version 11g. CVE-2012-3137 applies to Oracle 11.1 * and 11.2 databases. Oracle has "fixed" the problem in version 11.2.0.3. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ /* * Modifications (c) 2014 Harrison Neal, released under the same terms * as the original. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_o5logon; #elif FMT_REGISTERS_H john_register_one(&fmt_o5logon); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "sha.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "aes.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 512 // tuned on core i7 #endif //#define OMP_SCALE 8192 // tuned on K8-Dual HT #endif #include "memdbg.h" #define FORMAT_LABEL "o5logon" #define FORMAT_NAME "Oracle O5LOGON protocol" #define ALGORITHM_NAME "SHA1 AES 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define CIPHERTEXT_LENGTH 48 #define SALT_LENGTH 10 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_ALIGN 1 #define SALT_SIZE sizeof(struct custom_salt) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests o5logon_tests[] = { {"$o5logon$566499330E8896301A1D2711EFB59E756D41AF7A550488D82FE7C8A418E5BE08B4052C0DC404A805C1D7D43FE3350873*4F739806EBC1D7742BC6", "password"}, {"$o5logon$3BB71A77E1DBB5FFCCC8FC8C4537F16584CB5113E4CCE3BAFF7B66D527E32D29DF5A69FA747C4E2C18C1837F750E5BA6*4F739806EBC1D7742BC6", "password"}, {"$o5logon$ED91B97A04000F326F17430A65DACB30CD1EF788E6EC310742B811E32112C0C9CC39554C9C01A090CB95E95C94140C28*7FD52BC80AA5836695D4", "test1"}, {"$o5logon$B7711CC7E805520CEAE8C1AC459F745639E6C9338F192F92204A9518B226ED39851C154CB384E4A58C444A6DF26146E4*3D14D54520BC9E6511F4", "openwall"}, {"$o5logon$76F9BBAEEA9CF70F2A660A909F85F374F16F0A4B1BE1126A062AE9F0D3268821EF361BF08EBEF392F782F2D6D0192FD6*3D14D54520BC9E6511F4", "openwall"}, {"$o5logon$C35A36EA7FF7293EF828B2BD5A2830CA28A57BF621EAE14B605D41A88FC2CF7EFE7C73495FB22F06D6D98317D63DDA71*406813CBAEED2FD4AD23", "MDDATA"}, {"$o5logon$B9AC30E3CD7E1D7C95FA17E1C62D061289C36FD5A6C45C098FF7572AB9AD2B684FB7E131E03CE1543A5A99A30D68DD13*447BED5BE70F7067D646", "sys"}, // the following hash (from HITCON 2014 CTF) revealed multiple bugs in this format (false positives)! // m3odbe // m3o3rt {"$o5logon$A10D52C1A432B61834F4B0D9592F55BD0DA2B440AEEE1858515A646683240D24A61F0C9366C63E93D629292B7891F44A*878C0B92D61A594F2680", "m3ow00"}, {"$o5logon$52696131746C356643796B6D716F46474444787745543263764B725A6D756A69E46DE32AFBB33E385C6D9C7031F4F2B9*3131316D557239736A65", "123456"}, {"$o5logon$4336396C304B684638634450576B30397867704F54766D71494F676F5A5A386F09F4A10B5908B3ED5B1D6878A6C78751*573167557661774E7271", ""}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked, any_cracked; static struct custom_salt { char unsigned salt[SALT_LENGTH]; /* AUTH_VFR_DATA */ char unsigned ct[CIPHERTEXT_LENGTH]; /* AUTH_SESSKEY */ } *cur_salt; static aes_fptr_cbc aesFunc; static void init(struct fmt_main *self) { static char Buf[128]; #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); cracked = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cracked)); aesFunc = get_AES_dec192_CBC(); sprintf(Buf, "%s %s", self->params.algorithm_name, get_AES_type_string()); self->params.algorithm_name=Buf; } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; if (strncmp(ciphertext, "$o5logon$", 9)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 9; p = strtokm(ctcopy, "*"); /* ciphertext */ if(!p) goto err; if(hexlenu(p) != CIPHERTEXT_LENGTH * 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if(hexlenu(p) != SALT_LENGTH * 2) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; ctcopy += 9; /* skip over "$o5logon$" */ p = strtokm(ctcopy, "*"); for (i = 0; i < CIPHERTEXT_LENGTH; i++) cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < SALT_LENGTH; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; if (any_cracked) { memset(cracked, 0, sizeof(*cracked) * count); any_cracked = 0; } #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { unsigned char key[24]; unsigned char pt[16]; unsigned char iv[16]; // No longer using AES key here. SHA_CTX ctx; memset(&key[20], 0, 4); SHA1_Init(&ctx); SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index])); SHA1_Update(&ctx, cur_salt->salt, 10); SHA1_Final(key, &ctx); memcpy(iv, cur_salt->ct + 16, 16); // Using AES function: // in (cipher), out (plain), key, block count, iv aesFunc(cur_salt->ct + 32, pt, key, 1, iv); if (!memcmp(pt + 8, "\x08\x08\x08\x08\x08\x08\x08\x08", 8)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static void o5logon_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_o5logon = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD, { NULL }, o5logon_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, o5logon_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
main.c
#include <stdio.h> #include<omp.h> #include <stdlib.h> int main() { // Define the domain double x_len = 2.0; double y_len = 2.0; int x_points = 251; int y_points = 251; double del_x = x_len/(x_points-1); double del_y = y_len/(y_points-1); double x[x_points], y[y_points]; #pragma omp parallel { #pragma omp for nowait for(int i = 0; i < x_points; i++){ x[i] = i * del_x; } #pragma omp for for(int j = 0; j < y_points; j++){ y[j] = j * del_y; } } // printf("\n The domain coordinate is <x,y> \n \t"); // for(int i = 0; i < y_points; i++){ // for(int j = 0; j < x_points; j++){ // printf("%f ; %f \n \t", x[j], y[i]); // } // } // Define the parameters int num_itrs = 120; // Number of time iterations double nu = 0.01; double sigma = 0.0009; double del_t = sigma * del_x * del_y / nu; // CFL criteria double u[y_points][x_points], u_new[y_points][x_points]; double v[y_points][x_points], v_new[y_points][x_points]; #pragma omp parallel for for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ u[i][j] = 1.0; v[i][j] = 1.0; u_new[i][j] = 1.0; v_new[i][j] = 1.0; if(x[j] > 0.5 && x[j] < 1.0 && y[i] > 0.5 && y[i] < 1.0){ u[i][j] = 2.0; v[i][j] = 2.0; u_new[i][j] = 2.0; v_new[i][j] = 2.0; } } } // printf("\n The initial velocity is <u,v> \n \t"); // for(int i = 0; i < y_points; i++){ // for(int j = 0; j < x_points; j++){ // printf("%f ; %f \n \t", u[i][j], v[i][j]); // } // } // Iteration (parallel) double par_start_time = omp_get_wtime(); #pragma omp parallel for(int itr = 0; itr < num_itrs; itr++){ #pragma omp for nowait for(int i = 1; i < y_points-1; i++){ for(int j = 1; j < x_points-1; j++){ u_new[i][j] = u[i][j] + (nu*del_t/(del_x*del_x))*(u[i][j+1] + u[i][j-1] -2*u[i][j]) + (nu*del_t/(del_y*del_y))*(u[i+1][j] + u[i-1][j] -2*u[i][j]) - (del_t/del_x)*u[i][j]*(u[i][j] - u[i][j-1]) - (del_t/del_y)*v[i][j]*(u[i][j] - u[i-1][j]); v_new[i][j] = v[i][j] + (nu*del_t/(del_x*del_x))*(v[i][j+1] + v[i][j-1] -2*v[i][j]) + (nu*del_t/(del_y*del_y))*(v[i+1][j] + v[i-1][j] -2*v[i][j]) - (del_t/del_x)*u[i][j]*(v[i][j] - v[i][j-1]) - (del_t/del_y)*v[i][j]*(v[i][j] - v[i-1][j]); } } // Boundary conditions assign #pragma omp for nowait for(int i = 0; i < x_points; i++){ u_new[0][i] = 1.0; v_new[0][i] = 1.0; u_new[x_points-1][i] = 1.0; v_new[x_points-1][i] = 1.0; } #pragma omp for nowait for(int j = 0; j < y_points; j++){ u_new[j][0] = 1.0; v_new[j][0] = 1.0; u_new[j][y_points-1] = 1.0; v_new[j][y_points-1] = 1.0; } // Updating older values to newer ones #pragma omp for for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ u[i][j] = u_new[i][j]; v[i][j] = v_new[i][j]; } } } double par_end_time = omp_get_wtime(); // printf("\n The final velocity is <u,v> \n \t"); // for(int i = 0; i < y_points; i++){ // for(int j = 0; j < x_points; j++){ // printf("%f ; %f \n \t", u[i][j], v[i][j]); // } // } printf("\n Time taken for parallel computing is: %f", par_end_time - par_start_time); // Serial computing - to compare time // Redefining velocities for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ u[i][j] = 1.0; v[i][j] = 1.0; u_new[i][j] = 1.0; v_new[i][j] = 1.0; if(x[j] > 0.5 && x[j] < 1.0 && y[i] > 0.5 && y[i] < 1.0){ u[i][j] = 2.0; v[i][j] = 2.0; u_new[i][j] = 2.0; v_new[i][j] = 2.0; } } } // Iteration (parallel) double ser_start_time = omp_get_wtime(); for(int itr = 0; itr < num_itrs; itr++){ for(int i = 1; i < y_points-1; i++){ for(int j = 1; j < x_points-1; j++){ u_new[i][j] = u[i][j] + (nu*del_t/(del_x*del_x))*(u[i][j+1] + u[i][j-1] -2*u[i][j]) + (nu*del_t/(del_y*del_y))*(u[i+1][j] + u[i-1][j] -2*u[i][j]); v_new[i][j] = v[i][j] + (nu*del_t/(del_x*del_x))*(v[i][j+1] + v[i][j-1] -2*v[i][j]) + (nu*del_t/(del_y*del_y))*(v[i+1][j] + v[i-1][j] -2*v[i][j]); } } // Boundary conditions assign for(int i = 0; i < x_points; i++){ u_new[0][i] = 1.0; v_new[0][i] = 1.0; u_new[x_points-1][i] = 1.0; v_new[x_points-1][i] = 1.0; } for(int j = 0; j < y_points; j++){ u_new[j][0] = 1.0; v_new[j][0] = 1.0; u_new[j][y_points-1] = 1.0; v_new[j][y_points-1] = 1.0; } // Updating older values to newer ones for(int i = 0; i < y_points; i++){ for(int j = 0; j < x_points; j++){ u[i][j] = u_new[i][j]; v[i][j] = v_new[i][j]; } } } double ser_end_time = omp_get_wtime(); printf("\n Time taken for serial computing is: %f", ser_end_time - ser_start_time); printf("\n Speedup is \t : %f", (ser_end_time - ser_start_time)/(par_end_time - par_start_time)); return 0; }
GB_binop__eq_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__eq_fp64 // A.*B function (eWiseMult): GB_AemultB__eq_fp64 // A*D function (colscale): GB_AxD__eq_fp64 // D*A function (rowscale): GB_DxB__eq_fp64 // C+=B function (dense accum): GB_Cdense_accumB__eq_fp64 // C+=b function (dense accum): GB_Cdense_accumb__eq_fp64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__eq_fp64 // C=scalar+B GB_bind1st__eq_fp64 // C=scalar+B' GB_bind1st_tran__eq_fp64 // C=A+scalar GB_bind2nd__eq_fp64 // C=A'+scalar GB_bind2nd_tran__eq_fp64 // C type: bool // A type: double // B,b type: double // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x == y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_FP64 || GxB_NO_EQ_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__eq_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__eq_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__eq_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__eq_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__eq_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__eq_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__eq_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__eq_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__eq_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB_bind1st_tran__eq_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB_bind2nd_tran__eq_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
RelaxOp2DOMP.h
#ifndef _RelaxOp2D_OMP_h #define _RelaxOp2D_OMP_h #include "TriSolver.h" #include "GridFun2D.h" class RelaxOp2D{ public: double dt; TriSolver triSolX, triSolY; TriOperator triOpX, triOpY; GridFun2D uStar, Fstar, uTemp; std::vector<double> uX_Zero, uY_Zero; void initialize(double, double, double, const GridFun2D&); void apply(GridFun2D&, GridFun2D&); }; void RelaxOp2D::initialize(double timestep, double alphaX, double alphaY, const GridFun2D& f){ dt = timestep; uStar = f; uTemp = f; // Instantiation of temporary variables Fstar = f; Fstar *= dt; // Initialize Fstar*dt long M = f.xPanel; // Define system X size from the problem uX_Zero.resize(M+1); // Square matrix assumed uY_Zero.resize(M+1); for (long i = 1; i < M+1; i++) { uX_Zero[i] = 0.0; } for (long i = 1; i < M+1; i++) { uY_Zero[i] = 0.0; } std::vector<double> loDiag(M); std::vector<double> upDiag(M); std::vector<double> diag(M+1); double hx = f.hx; // Define grid size for x; double a = alphaX*dt/(2*hx*hx), b = -2.0 * a; // Interior Points for(long i = 0; i < M; i++){ loDiag[i] = a; upDiag[i] = a; diag[i] = b; } diag[0] = 0.0; diag[M] = 0.0; // B.C.s upDiag[0] = 0.0; loDiag[M-1] = 0.0; triOpX = TriOperator(M+1, loDiag, diag, upDiag); // Forward Solver in the x direction for(long i = 0; i < M ; i++){ // Change the elements for backsolver loDiag[i] = -a; upDiag[i] = -a; diag[i] = 1 - b; } diag[0] = 1.0; diag[M] = 1.0; // B.C.s upDiag[0] = 0.0; loDiag[M-1] = 0.0; triSolX = TriSolver(M+1, loDiag, diag, upDiag); // Backsolver in the x direction long N = f.yPanel; //Define system Y size from the problem loDiag.resize(N); upDiag.resize(N); diag.resize(N+1); double hy = f.hy; // Define grid size for x; a = alphaY*dt/(hy*hy); b = -2.0 * a; // Interior Points for(long i = 0; i < N; i++){ loDiag[i] = a; upDiag[i] = a; diag[i] = b; } diag[0] = 0.0; diag[N] = 0.0; // B.C.s upDiag[0] = 0.0; loDiag[N-1] = 0.0; triOpY = TriOperator(N+1, loDiag, diag, upDiag); // Forward Solver in the y direction a = alphaY*dt/(2*hy*hy); b = -2.0 * a; for(long i = 0; i < N; i++){ loDiag[i] = - a; upDiag[i] = - a; diag[i] = 1 - b; } diag[0] = 1.0; diag[N] = 1.0; // B.C.s upDiag[0] = 0.0; loDiag[N-1] = 0.0; triSolY = TriSolver(N+1, loDiag, diag, upDiag); // Backsolver in the y direction } void RelaxOp2D::apply(GridFun2D& uIn, GridFun2D& uOut){ std::vector<double> uXTemp, uYTemp; std::vector<double> uXTempNew, uYTempNew; uXTemp.resize(uStar.values.getIndex2Size()); uXTempNew.resize(uStar.values.getIndex2Size()); uYTemp.resize(uStar.values.getIndex1Size()); uYTempNew.resize(uStar.values.getIndex1Size()); long i; uOut = uIn; // Apply identity operator #pragma omp parallel for private(i) firstprivate(uXTemp,uXTempNew) schedule(static) default(shared) for (i = 1; i < uIn.yPanel; i++) { // Apply Forward X operator uIn.extractXslice(i, uXTemp); triOpX.apply(uXTemp, uXTempNew); uStar.insertXslice(i, uXTempNew); } uStar.insertXslice(0, uX_Zero); //Don't touch the boundary uStar.insertXslice(uIn.yPanel, uX_Zero); #pragma omp parallel for private(i) firstprivate(uYTemp,uYTempNew) schedule(static) default(shared) for (i = 1; i < uIn.xPanel; i++) { // Apply Forward Y operator uIn.extractYslice(i, uYTemp); triOpY.apply(uYTemp, uYTempNew); uTemp.insertYslice(i, uYTempNew); } uTemp.insertXslice(0, uY_Zero); //Don't touch the boundary uTemp.insertXslice(uIn.xPanel, uY_Zero); uOut += uStar; // Complete arithmetic in the x direction uOut += uTemp; uOut -= Fstar; #pragma omp parallel for private(i) firstprivate(uXTemp,uXTempNew) schedule(static) default(shared) for (i = 1; i < uIn.yPanel; i++) { // Apply Backsolver in the x direction uOut.extractXslice(i, uXTemp); triSolX.apply(uXTemp, uXTempNew); uStar.insertXslice(i, uXTempNew); } uOut.extractXslice(0, uXTemp); //Don't touch the boundary uStar.insertXslice(0, uXTemp); uOut.extractXslice(uIn.xPanel, uXTemp); uStar.insertXslice(uIn.xPanel, uXTemp); uTemp *= 0.5; // Divide by 2 uStar -= uTemp; // Complete arithmetics in the y direction #pragma omp parallel for private(i) firstprivate(uYTemp,uYTempNew) schedule(static) default(shared) for (i = 1; i < uIn.xPanel; i++) { // Apply Backsolver in the y direction uStar.extractYslice(i, uYTemp); triSolY.apply(uYTemp, uYTempNew); uOut.insertYslice(i, uYTempNew); } uStar.extractYslice(0, uYTemp); //Don't touch the boundary uOut.insertYslice(0, uYTemp); uStar.extractYslice(uIn.xPanel, uYTemp); uOut.insertYslice(uIn.xPanel, uYTemp); } ////################# DEBUG ####################### // std::cout << "######### After Y BackSolve ###########" << std::endl; // for (long j = 0; j < uIn.yPanel + 1; j++) { // for (long i = 0; i < uIn.xPanel + 1; i++) { // std::cout << uOut.values(j,i) << "\t"; // } // std::cout << std::endl; // } // std::cout << "####################" << std::endl; #endif
frpca.c
#include "frpca.h" /*[L, ~] = lu(A) as in MATLAB*/ void LUfraction(mat *A, mat *L) { matrix_copy(L, A); // printf("after matrix_copy\n"); int *ipiv = (int *)malloc(sizeof(int)*L->nrows); // printf("before LAPACKE_dgetrf\n"); LAPACKE_dgetrf (LAPACK_COL_MAJOR, L->nrows, L->ncols, L->d, L->nrows, ipiv); // printf("after LAPACKE_dgetrf\n"); int i,j; #pragma omp parallel private(i,j) { #pragma omp for for(i=0;i<L->ncols;i++) { for(j=0;j<i;j++) { L->d[i*L->nrows+j] = 0; } L->d[i*L->nrows+i] = 1; } } { for(i=L->ncols-1;i>=0;i--) { int ipi = ipiv[i]-1; for(j=0;j<L->ncols;j++) { double temp = L->d[j*L->nrows+ipi]; L->d[j*L->nrows+ipi] = L->d[j*L->nrows+i]; L->d[j*L->nrows+i] = temp; } } } } /*[U, S, V] = eigSVD(A)*/ void eigSVD(mat* A, mat **U, mat **S, mat **V) { matrix_transpose_matrix_mult(A, A, *V); LAPACKE_dsyevd(LAPACK_COL_MAJOR, 'V', 'U', (*V)->ncols, (*V)->d, (*V)->ncols, (*S)->d); mat *V1 = matrix_new((*V)->ncols, (*V)->ncols); matrix_copy(V1, (*V)); int i, j; // printf("before parallel shared\n"); #pragma omp parallel shared(V1,S) private(i,j) { #pragma omp for for(i=0; i<V1->ncols; i++) { (*S)->d[i] = sqrt((*S)->d[i]); for(j=0; j<V1->nrows;j++) V1->d[i*V1->nrows+j] /= (*S)->d[i]; } } // printf("before matrix_matrix_mult\n"); matrix_matrix_mult(A, V1, (*U)); } /*[U, S, V] = frSVD(A, k, p)*/ void frPCAt(mat_csr *A, mat **U, mat **S, mat **V, int k, int q) { int s = 5; mat *Q = matrix_new(A->nrows, k+s); mat *Qt = matrix_new(A->ncols, k+s); //mat *UU = matrix_new(A->nrows, k+s); mat *SS = matrix_new(k+s, 1); mat *VV = matrix_new(k+s, k+s); if(q%2 == 0) { initialize_random_matrix(Q); csr_matrix_transpose_matrix_mult(A, Q, Qt); if(q==2) { eigSVD(Qt, &Qt, &SS, &VV); } else { LUfraction(Qt, Qt); } } else { initialize_random_matrix(Qt); } int niter = (q-1)/2, i; for(i=1;i<=niter;i++) { csr_matrix_matrix_mult(A, Qt, Q); csr_matrix_transpose_matrix_mult(A, Q, Qt); if(i==niter) { eigSVD(Qt, &Qt, &SS, &VV); } else { LUfraction(Qt, Qt); } } csr_matrix_matrix_mult(A, Qt, Q); eigSVD(Q, &Q, &SS, &VV); int inds[k]; for(i=s;i<k+s;i++) { inds[i-s] = i; } mat *VV2 = matrix_new(k+s, k); matrix_get_selected_columns(Q, inds, *U); matrix_get_selected_rows(SS, inds, *S); matrix_get_selected_columns(VV, inds, VV2); matrix_matrix_mult(Qt, VV2, (*V)); } /*[U, S, V] = frSVD(A, k, p)*/ void frPCA(mat_csr *A, mat **U, mat **S, mat **V, int k, int q) { // printf("before conduct frpca\n"); int s = 5; mat *Q = matrix_new(A->nrows, k+s); mat *Qt = matrix_new(A->ncols, k+s); mat *UU = matrix_new(A->ncols, k+s); mat *SS = matrix_new(k+s, 1); mat *VV = matrix_new(k+s, k+s); // printf("before recurence\n"); if(q%2 == 0) { initialize_random_matrix(Qt); // printf("after initialize_random_matrix\n"); csr_matrix_matrix_mult(A, Qt, Q); // printf("after csr_matrix_matrix_mult\n"); if(q==2) { // printf("before eigSVD\n"); eigSVD(Q, &Q, &SS, &VV); // printf("after eigSVD\n"); } else { // printf("before LUfraction\n"); LUfraction(Q, Q); // printf("after LUfraction\n"); } } else { initialize_random_matrix(Q); // printf("after initialize_random_matrix\n"); } // printf("before iteration\n"); int niter = (q-1)/2, i; for(i=1;i<=niter;i++) { // printf("iteration in frpca\n"); csr_matrix_transpose_matrix_mult(A, Q, Qt); // printf("after csr_matrix_transpose_matrix_mult\n"); csr_matrix_matrix_mult(A, Qt, Q); if(i==niter) { eigSVD(Q, &Q, &SS, &VV); } else { LUfraction(Q, Q); } } csr_matrix_transpose_matrix_mult(A, Q, Qt); eigSVD(Qt, &UU, &SS, &VV); int inds[k]; for(i=s;i<k+s;i++) { inds[i-s] = i; } mat *VV2 = matrix_new(k+s, k); matrix_get_selected_columns(UU, inds, *V); matrix_get_selected_rows(SS, inds, *S); matrix_get_selected_columns(VV, inds, VV2); matrix_matrix_mult(Q, VV2, (*U)); } void randQB_basic_csr(mat_csr *M, int k, int p, mat **U, mat **S, mat **V) { int m, n, i, l=k+5; m = M->nrows; n = M->ncols; mat *Q = matrix_new(m, l); mat *B = matrix_new(l, n); mat *Vt = matrix_new(l, n); mat *VV = matrix_new(n, l); mat *UU = matrix_new(l, l); mat *UUk = matrix_new(l, k); mat *SS = matrix_new(l, l); *U = matrix_new(m ,k); *V = matrix_new(n, k); *S = matrix_new(k, k); // samples mat *R, *G, *Bt; R = matrix_new(n, l); G = Q; Bt = R; initialize_random_matrix(R); csr_matrix_matrix_mult(M, R, G); QR_factorization_getQ_inplace(G); // power iteration if (p > 0) { for (i = 0; i < p; i++) { csr_matrix_transpose_matrix_mult(M, G, R); QR_factorization_getQ_inplace(R); csr_matrix_matrix_mult(M, R, G); QR_factorization_getQ_inplace(G); } } //QR_factorization_getQ_inplace(G); csr_matrix_transpose_matrix_mult(M, Q, Bt); matrix_build_transpose(B, Bt); singular_value_decomposition(B, UU, SS, Vt); matrix_build_transpose(VV, Vt); int inds[k]; for(i=0;i<k;i++) { inds[i] = i; } matrix_get_selected_columns(UU, inds, UUk); matrix_matrix_mult(Q, UUk, (*U)); matrix_get_selected_columns(VV, inds, (*V)); matrix_copy_first_k_rows_and_columns(*S, SS); matrix_delete(R); }
convolution_1x1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv1x1s1_sgemm_transform_kernel_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { const float* kernel = _kernel; // interleave #if __ARM_NEON && __aarch64__ kernel_tm.create(4*8, inch/4 + inch%4, outch/8 + (outch%8)/4 + outch%4); #else kernel_tm.create(4*4, inch/4 + inch%4, outch/4 + outch%4); #endif // __ARM_NEON && __aarch64__ int p = 0; #if __ARM_NEON && __aarch64__ for (; p+7<outch; p+=8) { const float* kernel0 = kernel + (p+0)*inch; const float* kernel1 = kernel + (p+1)*inch; const float* kernel2 = kernel + (p+2)*inch; const float* kernel3 = kernel + (p+3)*inch; const float* kernel4 = kernel + (p+4)*inch; const float* kernel5 = kernel + (p+5)*inch; const float* kernel6 = kernel + (p+6)*inch; const float* kernel7 = kernel + (p+7)*inch; float* ktmp = kernel_tm.channel(p/8); for (int q=0; q<inch; q++) { // kernel0...7 0 ktmp[0] = kernel0[0]; ktmp[1] = kernel1[0]; ktmp[2] = kernel2[0]; ktmp[3] = kernel3[0]; ktmp[4] = kernel4[0]; ktmp[5] = kernel5[0]; ktmp[6] = kernel6[0]; ktmp[7] = kernel7[0]; ktmp += 8; kernel0 += 1; kernel1 += 1; kernel2 += 1; kernel3 += 1; kernel4 += 1; kernel5 += 1; kernel6 += 1; kernel7 += 1; } } #endif // __ARM_NEON && __aarch64__ for (; p+3<outch; p+=4) { const float* kernel0 = kernel + (p+0)*inch; const float* kernel1 = kernel + (p+1)*inch; const float* kernel2 = kernel + (p+2)*inch; const float* kernel3 = kernel + (p+3)*inch; #if __ARM_NEON && __aarch64__ float* ktmp = kernel_tm.channel(p/8 + (p%8)/4); #else float* ktmp = kernel_tm.channel(p/4); #endif // __ARM_NEON && __aarch64__ for (int q=0; q<inch; q++) { // kernel0...3 0 ktmp[0] = kernel0[0]; ktmp[1] = kernel1[0]; ktmp[2] = kernel2[0]; ktmp[3] = kernel3[0]; ktmp += 4; kernel0 += 1; kernel1 += 1; kernel2 += 1; kernel3 += 1; } } for (; p<outch; p++) { const float* kernel0 = kernel + p*inch; #if __ARM_NEON && __aarch64__ float* ktmp = kernel_tm.channel(p/8 + (p%8)/4 + p%4); #else float* ktmp = kernel_tm.channel(p/4 + p%4); #endif // __ARM_NEON && __aarch64__ for (int q=0; q<inch; q++) { ktmp[0] = kernel0[0]; ktmp++; kernel0++; } } } static void conv1x1s1_sgemm_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int size = w * h; const float* bias = _bias; // interleave Mat tmp(8*4, inch/4+inch%4, size/8 + (size%8)/4 + size%4, 4u, opt.workspace_allocator); { int nn_size = size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 8; const float* img0 = bottom_blob.channel(0); img0 += i; float* tmpptr = tmp.channel(i/8); for (int q=0; q<inch; q++) { #if __ARM_NEON #if __aarch64__ vst1q_f32(tmpptr, vld1q_f32(img0)); vst1q_f32(tmpptr+4, vld1q_f32(img0+4)); tmpptr += 8; img0 += bottom_blob.cstep; #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0 :128] \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1" ); img0 += bottom_blob.cstep; #endif // __aarch64__ #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; tmpptr += 8; img0 += bottom_blob.cstep; #endif // __ARM_NEON } } nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = remain_size_start + ii * 4; const float* img0 = bottom_blob.channel(0); img0 += i; float* tmpptr = tmp.channel(i/8 + (i%8)/4); for (int q=0; q<inch; q++) { #if __ARM_NEON #if __aarch64__ vst1q_f32(tmpptr, vld1q_f32(img0)); tmpptr += 4; img0 += bottom_blob.cstep; #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0" ); img0 += bottom_blob.cstep; #endif // __aarch64__ #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; #endif // __ARM_NEON } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<size; i++) { const float* img0 = bottom_blob.channel(0); img0 += i; float* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); for (int q=0; q<inch; q++) { tmpptr[0] = img0[0]; tmpptr++; img0 += bottom_blob.cstep; } } } int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; float* outptr0 = top_blob.channel(p); float* outptr1 = top_blob.channel(p+1); float* outptr2 = top_blob.channel(p+2); float* outptr3 = top_blob.channel(p+3); float* outptr4 = top_blob.channel(p+4); float* outptr5 = top_blob.channel(p+5); float* outptr6 = top_blob.channel(p+6); float* outptr7 = top_blob.channel(p+7); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p : zeros; int i = 0; for (; i+7<size; i+=8) { const float* tmpptr = tmp.channel(i/8); const float* kptr = kernel.channel(p/8); asm volatile( "ld1 {v0.4s, v1.4s}, [%20] \n" "dup v16.4s, v0.s[0] \n" "dup v17.4s, v0.s[0] \n" "dup v18.4s, v0.s[1] \n" "dup v19.4s, v0.s[1] \n" "dup v20.4s, v0.s[2] \n" "dup v21.4s, v0.s[2] \n" "dup v22.4s, v0.s[3] \n" "dup v23.4s, v0.s[3] \n" "dup v24.4s, v1.s[0] \n" "dup v25.4s, v1.s[0] \n" "dup v26.4s, v1.s[1] \n" "dup v27.4s, v1.s[1] \n" "dup v28.4s, v1.s[2] \n" "dup v29.4s, v1.s[2] \n" "dup v30.4s, v1.s[3] \n" "dup v31.4s, v1.s[3] \n" // inch loop "lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v18.4s, v8.4s, v0.s[1] \n" "fmla v20.4s, v8.4s, v0.s[2] \n" "fmla v22.4s, v8.4s, v0.s[3] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "fmla v21.4s, v9.4s, v0.s[2] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "fmla v24.4s, v8.4s, v1.s[0] \n" "fmla v26.4s, v8.4s, v1.s[1] \n" "fmla v28.4s, v8.4s, v1.s[2] \n" "fmla v30.4s, v8.4s, v1.s[3] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v27.4s, v9.4s, v1.s[1] \n" "fmla v29.4s, v9.4s, v1.s[2] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n" "fmla v16.4s, v10.4s, v2.s[0] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v20.4s, v10.4s, v2.s[2] \n" "fmla v22.4s, v10.4s, v2.s[3] \n" "fmla v17.4s, v11.4s, v2.s[0] \n" "fmla v19.4s, v11.4s, v2.s[1] \n" "fmla v21.4s, v11.4s, v2.s[2] \n" "fmla v23.4s, v11.4s, v2.s[3] \n" "fmla v24.4s, v10.4s, v3.s[0] \n" "fmla v26.4s, v10.4s, v3.s[1] \n" "fmla v28.4s, v10.4s, v3.s[2] \n" "fmla v30.4s, v10.4s, v3.s[3] \n" "fmla v25.4s, v11.4s, v3.s[0] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v29.4s, v11.4s, v3.s[2] \n" "fmla v31.4s, v11.4s, v3.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "fmla v16.4s, v12.4s, v4.s[0] \n" "fmla v18.4s, v12.4s, v4.s[1] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v22.4s, v12.4s, v4.s[3] \n" "fmla v17.4s, v13.4s, v4.s[0] \n" "fmla v19.4s, v13.4s, v4.s[1] \n" "fmla v21.4s, v13.4s, v4.s[2] \n" "fmla v23.4s, v13.4s, v4.s[3] \n" "fmla v24.4s, v12.4s, v5.s[0] \n" "fmla v26.4s, v12.4s, v5.s[1] \n" "fmla v28.4s, v12.4s, v5.s[2] \n" "fmla v30.4s, v12.4s, v5.s[3] \n" "fmla v25.4s, v13.4s, v5.s[0] \n" "fmla v27.4s, v13.4s, v5.s[1] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v31.4s, v13.4s, v5.s[3] \n" "subs w4, w4, #1 \n" "fmla v16.4s, v14.4s, v6.s[0] \n" "fmla v18.4s, v14.4s, v6.s[1] \n" "fmla v20.4s, v14.4s, v6.s[2] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v17.4s, v15.4s, v6.s[0] \n" "fmla v19.4s, v15.4s, v6.s[1] \n" "fmla v21.4s, v15.4s, v6.s[2] \n" "fmla v23.4s, v15.4s, v6.s[3] \n" "fmla v24.4s, v14.4s, v7.s[0] \n" "fmla v26.4s, v14.4s, v7.s[1] \n" "fmla v28.4s, v14.4s, v7.s[2] \n" "fmla v30.4s, v14.4s, v7.s[3] \n" "fmla v25.4s, v15.4s, v7.s[0] \n" "fmla v27.4s, v15.4s, v7.s[1] \n" "fmla v29.4s, v15.4s, v7.s[2] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w21, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v8.4s, v9.4s}, [%8], #32 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v18.4s, v8.4s, v0.s[1] \n" "fmla v20.4s, v8.4s, v0.s[2] \n" "fmla v22.4s, v8.4s, v0.s[3] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "fmla v21.4s, v9.4s, v0.s[2] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v24.4s, v8.4s, v1.s[0] \n" "fmla v26.4s, v8.4s, v1.s[1] \n" "fmla v28.4s, v8.4s, v1.s[2] \n" "fmla v30.4s, v8.4s, v1.s[3] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v27.4s, v9.4s, v1.s[1] \n" "fmla v29.4s, v9.4s, v1.s[2] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0], #32 \n" "st1 {v18.4s, v19.4s}, [%1], #32 \n" "st1 {v20.4s, v21.4s}, [%2], #32 \n" "st1 {v22.4s, v23.4s}, [%3], #32 \n" "st1 {v24.4s, v25.4s}, [%4], #32 \n" "st1 {v26.4s, v27.4s}, [%5], #32 \n" "st1 {v28.4s, v29.4s}, [%6], #32 \n" "st1 {v30.4s, v31.4s}, [%7], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(biasptr), // %20 "r"(inch) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } for (; i+3<size; i+=4) { const float* tmpptr = tmp.channel(i/8 + (i%8)/4); const float* kptr = kernel.channel(p/8); asm volatile( "ld1 {v0.4s, v1.4s}, [%20] \n" "dup v16.4s, v0.s[0] \n" "dup v17.4s, v0.s[1] \n" "dup v18.4s, v0.s[2] \n" "dup v19.4s, v0.s[3] \n" "dup v20.4s, v1.s[0] \n" "dup v21.4s, v1.s[1] \n" "dup v22.4s, v1.s[2] \n" "dup v23.4s, v1.s[3] \n" // inch loop "lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "fmla v16.4s, v9.4s, v2.s[0] \n" "fmla v17.4s, v9.4s, v2.s[1] \n" "fmla v18.4s, v9.4s, v2.s[2] \n" "fmla v19.4s, v9.4s, v2.s[3] \n" "fmla v20.4s, v9.4s, v3.s[0] \n" "fmla v21.4s, v9.4s, v3.s[1] \n" "fmla v22.4s, v9.4s, v3.s[2] \n" "fmla v23.4s, v9.4s, v3.s[3] \n" "subs w4, w4, #1 \n" "fmla v16.4s, v10.4s, v4.s[0] \n" "fmla v17.4s, v10.4s, v4.s[1] \n" "fmla v18.4s, v10.4s, v4.s[2] \n" "fmla v19.4s, v10.4s, v4.s[3] \n" "fmla v20.4s, v10.4s, v5.s[0] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v5.s[2] \n" "fmla v23.4s, v10.4s, v5.s[3] \n" "fmla v16.4s, v11.4s, v6.s[0] \n" "fmla v17.4s, v11.4s, v6.s[1] \n" "fmla v18.4s, v11.4s, v6.s[2] \n" "fmla v19.4s, v11.4s, v6.s[3] \n" "fmla v20.4s, v11.4s, v7.s[0] \n" "fmla v21.4s, v11.4s, v7.s[1] \n" "fmla v22.4s, v11.4s, v7.s[2] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w21, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v8.4s}, [%8], #16 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "bne 2b \n" "3: \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "st1 {v20.4s}, [%4], #16 \n" "st1 {v21.4s}, [%5], #16 \n" "st1 {v22.4s}, [%6], #16 \n" "st1 {v23.4s}, [%7], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(biasptr), // %20 "r"(inch) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); } for (; i<size; i++) { const float* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); const float* kptr = kernel.channel(p/8); asm volatile( "ld1 {v24.4s, v25.4s}, [%20] \n" // inch loop "lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v8.4s}, [%8], #16 \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v16.4s, v0.4s, v8.s[0] \n" "fmla v17.4s, v1.4s, v8.s[0] \n" "fmla v18.4s, v2.4s, v8.s[1] \n" "fmla v19.4s, v3.4s, v8.s[1] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "subs w4, w4, #1 \n" "fmla v20.4s, v4.4s, v8.s[2] \n" "fmla v21.4s, v5.4s, v8.s[2] \n" "fmla v22.4s, v6.4s, v8.s[3] \n" "fmla v23.4s, v7.4s, v8.s[3] \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "fadd v16.4s, v16.4s, v20.4s \n" "fadd v17.4s, v17.4s, v21.4s \n" "fadd v24.4s, v24.4s, v16.4s \n" "fadd v25.4s, v25.4s, v17.4s \n" "1: \n" // remain loop "and w4, %w21, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #32] \n" "ld1r {v8.4s}, [%8], #4 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "subs w4, w4, #1 \n" "fmla v24.4s, v8.4s, v0.4s \n" "fmla v25.4s, v8.4s, v1.4s \n" "bne 2b \n" "3: \n" "st1 {v24.s}[0],[%0], #4 \n" "st1 {v24.s}[1],[%1], #4 \n" "st1 {v24.s}[2],[%2], #4 \n" "st1 {v24.s}[3],[%3], #4 \n" "st1 {v25.s}[0],[%4], #4 \n" "st1 {v25.s}[1],[%5], #4 \n" "st1 {v25.s}[2],[%6], #4 \n" "st1 {v25.s}[3],[%7], #4 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(biasptr), // %20 "r"(inch) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25" ); } } #endif // __ARM_NEON && __aarch64__ nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* outptr0 = top_blob.channel(p); float* outptr1 = top_blob.channel(p+1); float* outptr2 = top_blob.channel(p+2); float* outptr3 = top_blob.channel(p+3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p : zeros; int i = 0; for (; i+7<size; i+=8) { const float* tmpptr = tmp.channel(i/8); #if __ARM_NEON && __aarch64__ const float* kptr = kernel.channel(p/8 + (p%8)/4); #else const float* kptr = kernel.channel(p/4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%12] \n" "dup v8.4s, v0.s[0] \n" "dup v9.4s, v0.s[0] \n" "dup v10.4s, v0.s[1] \n" "dup v11.4s, v0.s[1] \n" "dup v12.4s, v0.s[2] \n" "dup v13.4s, v0.s[2] \n" "dup v14.4s, v0.s[3] \n" "dup v15.4s, v0.s[3] \n" // inch loop "lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v10.4s, v4.4s, v0.s[1] \n" "fmla v12.4s, v4.4s, v0.s[2] \n" "fmla v14.4s, v4.4s, v0.s[3] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v11.4s, v5.4s, v0.s[1] \n" "fmla v13.4s, v5.4s, v0.s[2] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v10.4s, v6.4s, v1.s[1] \n" "fmla v12.4s, v6.4s, v1.s[2] \n" "fmla v14.4s, v6.4s, v1.s[3] \n" "fmla v9.4s, v7.4s, v1.s[0] \n" "fmla v11.4s, v7.4s, v1.s[1] \n" "fmla v13.4s, v7.4s, v1.s[2] \n" "fmla v15.4s, v7.4s, v1.s[3] \n" "subs w4, w4, #1 \n" "fmla v8.4s, v16.4s, v2.s[0] \n" "fmla v10.4s, v16.4s, v2.s[1] \n" "fmla v12.4s, v16.4s, v2.s[2] \n" "fmla v14.4s, v16.4s, v2.s[3] \n" "fmla v9.4s, v17.4s, v2.s[0] \n" "fmla v11.4s, v17.4s, v2.s[1] \n" "fmla v13.4s, v17.4s, v2.s[2] \n" "fmla v15.4s, v17.4s, v2.s[3] \n" "fmla v8.4s, v18.4s, v3.s[0] \n" "fmla v10.4s, v18.4s, v3.s[1] \n" "fmla v12.4s, v18.4s, v3.s[2] \n" "fmla v14.4s, v18.4s, v3.s[3] \n" "fmla v9.4s, v19.4s, v3.s[0] \n" "fmla v11.4s, v19.4s, v3.s[1] \n" "fmla v13.4s, v19.4s, v3.s[2] \n" "fmla v15.4s, v19.4s, v3.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w13, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4s, v5.4s}, [%4], #32 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v10.4s, v4.4s, v0.s[1] \n" "fmla v12.4s, v4.4s, v0.s[2] \n" "fmla v14.4s, v4.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v11.4s, v5.4s, v0.s[1] \n" "fmla v13.4s, v5.4s, v0.s[2] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "bne 2b \n" "3: \n" "st1 {v8.4s, v9.4s}, [%0], #32 \n" "st1 {v10.4s, v11.4s}, [%1], #32 \n" "st1 {v12.4s, v13.4s}, [%2], #32 \n" "st1 {v14.4s, v15.4s}, [%3], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); #else // __aarch64__ asm volatile( "vld1.f32 {d0-d1}, [%12] \n" "vdup.f32 q8, d0[0] \n" "vdup.f32 q9, d0[0] \n" "vdup.f32 q10, d0[1] \n" "vdup.f32 q11, d0[1] \n" "vdup.f32 q12, d1[0] \n" "vdup.f32 q13, d1[0] \n" "vdup.f32 q14, d1[1] \n" "vdup.f32 q15, d1[1] \n" // inch loop "lsr r4, %13, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" // "vld1.f32 {d0-d3}, [%5 :128]! \n" // "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q15, q5, d1[1] \n" "vmla.f32 q8, q6, d2[0] \n" "vmla.f32 q10, q6, d2[1] \n" "vmla.f32 q12, q6, d3[0] \n" "vmla.f32 q14, q6, d3[1] \n" "vmla.f32 q9, q7, d2[0] \n" "vmla.f32 q11, q7, d2[1] \n" "vmla.f32 q13, q7, d3[0] \n" "vmla.f32 q15, q7, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "vmla.f32 q8, q4, d4[0] \n" "vmla.f32 q10, q4, d4[1] \n" "vmla.f32 q12, q4, d5[0] \n" "vmla.f32 q14, q4, d5[1] \n" "vmla.f32 q9, q5, d4[0] \n" "vmla.f32 q11, q5, d4[1] \n" "vmla.f32 q13, q5, d5[0] \n" "vmla.f32 q15, q5, d5[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q6, d6[0] \n" "vmla.f32 q10, q6, d6[1] \n" "vmla.f32 q12, q6, d7[0] \n" "vmla.f32 q14, q6, d7[1] \n" "vmla.f32 q9, q7, d6[0] \n" "vmla.f32 q11, q7, d6[1] \n" "vmla.f32 q13, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %13, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #256] \n" "vld1.f32 {d8-d11}, [%4 :128]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q15, q5, d1[1] \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" "vst1.f32 {d24-d27}, [%2 :128]! \n" "vst1.f32 {d28-d31}, [%3 :128]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else float sum0_0 = biasptr[0]; float sum0_1 = biasptr[0]; float sum0_2 = biasptr[0]; float sum0_3 = biasptr[0]; float sum0_4 = biasptr[0]; float sum0_5 = biasptr[0]; float sum0_6 = biasptr[0]; float sum0_7 = biasptr[0]; float sum1_0 = biasptr[1]; float sum1_1 = biasptr[1]; float sum1_2 = biasptr[1]; float sum1_3 = biasptr[1]; float sum1_4 = biasptr[1]; float sum1_5 = biasptr[1]; float sum1_6 = biasptr[1]; float sum1_7 = biasptr[1]; float sum2_0 = biasptr[2]; float sum2_1 = biasptr[2]; float sum2_2 = biasptr[2]; float sum2_3 = biasptr[2]; float sum2_4 = biasptr[2]; float sum2_5 = biasptr[2]; float sum2_6 = biasptr[2]; float sum2_7 = biasptr[2]; float sum3_0 = biasptr[3]; float sum3_1 = biasptr[3]; float sum3_2 = biasptr[3]; float sum3_3 = biasptr[3]; float sum3_4 = biasptr[3]; float sum3_5 = biasptr[3]; float sum3_6 = biasptr[3]; float sum3_7 = biasptr[3]; for (int q=0; q<inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum0_4 += tmpptr[4] * kptr[0]; sum0_5 += tmpptr[5] * kptr[0]; sum0_6 += tmpptr[6] * kptr[0]; sum0_7 += tmpptr[7] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum1_4 += tmpptr[4] * kptr[1]; sum1_5 += tmpptr[5] * kptr[1]; sum1_6 += tmpptr[6] * kptr[1]; sum1_7 += tmpptr[7] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum2_4 += tmpptr[4] * kptr[2]; sum2_5 += tmpptr[5] * kptr[2]; sum2_6 += tmpptr[6] * kptr[2]; sum2_7 += tmpptr[7] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; sum3_4 += tmpptr[4] * kptr[3]; sum3_5 += tmpptr[5] * kptr[3]; sum3_6 += tmpptr[6] * kptr[3]; sum3_7 += tmpptr[7] * kptr[3]; tmpptr += 8; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr0[4] = sum0_4; outptr0[5] = sum0_5; outptr0[6] = sum0_6; outptr0[7] = sum0_7; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr1[4] = sum1_4; outptr1[5] = sum1_5; outptr1[6] = sum1_6; outptr1[7] = sum1_7; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr2[4] = sum2_4; outptr2[5] = sum2_5; outptr2[6] = sum2_6; outptr2[7] = sum2_7; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr3[4] = sum3_4; outptr3[5] = sum3_5; outptr3[6] = sum3_6; outptr3[7] = sum3_7; outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; #endif // __ARM_NEON } for (; i+3<size; i+=4) { const float* tmpptr = tmp.channel(i/8 + (i%8)/4); #if __ARM_NEON && __aarch64__ const float* kptr = kernel.channel(p/8 + (p%8)/4); #else const float* kptr = kernel.channel(p/4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%12] \n" "dup v8.4s, v0.s[0] \n" "dup v9.4s, v0.s[1] \n" "dup v10.4s, v0.s[2] \n" "dup v11.4s, v0.s[3] \n" // inch loop "lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v8.4s, v5.4s, v1.s[0] \n" "fmla v9.4s, v5.4s, v1.s[1] \n" "fmla v10.4s, v5.4s, v1.s[2] \n" "fmla v11.4s, v5.4s, v1.s[3] \n" "subs w4, w4, #1 \n" "fmla v8.4s, v6.4s, v2.s[0] \n" "fmla v9.4s, v6.4s, v2.s[1] \n" "fmla v10.4s, v6.4s, v2.s[2] \n" "fmla v11.4s, v6.4s, v2.s[3] \n" "fmla v8.4s, v7.4s, v3.s[0] \n" "fmla v9.4s, v7.4s, v3.s[1] \n" "fmla v10.4s, v7.4s, v3.s[2] \n" "fmla v11.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w13, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v4.4s}, [%4], #16 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "bne 2b \n" "3: \n" "st1 {v8.4s}, [%0], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v10.4s}, [%2], #16 \n" "st1 {v11.4s}, [%3], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11" ); #else // __aarch64__ asm volatile( "vld1.f32 {d0-d1}, [%12] \n" "vdup.f32 q8, d0[0] \n" "vdup.f32 q9, d0[1] \n" "vdup.f32 q10, d1[0] \n" "vdup.f32 q11, d1[1] \n" // inch loop "lsr r4, %13, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" // "vld1.f32 {d0-d3}, [%5 :128]! \n" // "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q8, q5, d2[0] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d3[0] \n" "vmla.f32 q11, q5, d3[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q6, d4[0] \n" "vmla.f32 q9, q6, d4[1] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d5[1] \n" "vmla.f32 q8, q7, d6[0] \n" "vmla.f32 q9, q7, d6[1] \n" "vmla.f32 q10, q7, d7[0] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %13, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #128] \n" "vld1.f32 {d8-d9}, [%4 :128]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11" ); #endif // __aarch64__ #else float sum0_0 = biasptr[0]; float sum0_1 = biasptr[0]; float sum0_2 = biasptr[0]; float sum0_3 = biasptr[0]; float sum1_0 = biasptr[1]; float sum1_1 = biasptr[1]; float sum1_2 = biasptr[1]; float sum1_3 = biasptr[1]; float sum2_0 = biasptr[2]; float sum2_1 = biasptr[2]; float sum2_2 = biasptr[2]; float sum2_3 = biasptr[2]; float sum3_0 = biasptr[3]; float sum3_1 = biasptr[3]; float sum3_2 = biasptr[3]; float sum3_3 = biasptr[3]; for (int q=0; q<inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #endif // __ARM_NEON } for (; i<size; i++) { const float* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); #if __ARM_NEON && __aarch64__ const float* kptr = kernel.channel(p/8 + (p%8)/4); #else const float* kptr = kernel.channel(p/4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v12.4s}, [%12] \n" // inch loop "lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v4.4s}, [%4], #16 \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[1] \n" "fmla v10.4s, v2.4s, v4.s[2] \n" "fmla v11.4s, v3.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "fadd v12.4s, v12.4s, v8.4s \n" "1: \n" // remain loop "and w4, %w13, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #32] \n" "ld1r {v4.4s}, [%4], #4 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "subs w4, w4, #1 \n" "fmla v12.4s, v4.4s, v0.4s \n" "bne 2b \n" "3: \n" "st1 {v12.s}[0], [%0], #4 \n" "st1 {v12.s}[1], [%1], #4 \n" "st1 {v12.s}[2], [%2], #4 \n" "st1 {v12.s}[3], [%3], #4 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12" ); #else // __aarch64__ asm volatile( "vld1.f32 {d24-d25}, [%12] \n" // inch loop "lsr r4, %13, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "veor q10, q10, q10 \n" "veor q11, q11, q11 \n" "0: \n" "pld [%4, #128] \n" "vld1.f32 {d8-d9}, [%4 :128]! \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" // "vld1.f32 {d0-d3}, [%5 :128]! \n" // "vld1.f32 {d4-d7}, [%5 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[1] \n" "vmla.f32 q10, q2, d9[0] \n" "vmla.f32 q11, q3, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q12, q12, q8 \n" "1: \n" // remain loop "and r4, %13, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #32] \n" "vld1.f32 {d8[],d9[]}, [%4]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q12, q4, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d24[0]}, [%0]! \n" "vst1.f32 {d24[1]}, [%1]! \n" "vst1.f32 {d25[0]}, [%2]! \n" "vst1.f32 {d25[1]}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int q=0; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr++; kptr += 4; } outptr0[0] = sum0; outptr1[0] = sum1; outptr2[0] = sum2; outptr3[0] = sum3; outptr0++; outptr1++; outptr2++; outptr3++; #endif // __ARM_NEON } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out0 = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; float* outptr0 = out0; int i = 0; for (; i+7<size; i+=8) { const float* tmpptr = tmp.channel(i/8); #if __ARM_NEON && __aarch64__ const float* kptr = kernel.channel(p/8 + (p%8)/4 + p%4); #else const float* kptr = kernel.channel(p/4 + p%4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "dup v8.4s, %w6 \n" "dup v9.4s, %w6 \n" // inch loop "lsr w4, %w7, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "fmla v8.4s, v6.4s, v0.s[1] \n" "fmla v9.4s, v7.4s, v0.s[1] \n" "subs w4, w4, #1 \n" "fmla v8.4s, v12.4s, v0.s[2] \n" "fmla v9.4s, v13.4s, v0.s[2] \n" "fmla v8.4s, v14.4s, v0.s[3] \n" "fmla v9.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w7, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v4.4s, v5.4s}, [%1], #32 \n" "prfm pldl1keep, [%2, #32] \n" "ld1r {v0.4s}, [%2], #4 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.4s \n" "fmla v9.4s, v5.4s, v0.4s \n" "bne 2b \n" "3: \n" "st1 {v8.4s, v9.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15" ); #else // __aarch64__ asm volatile( "vdup.f32 q8, %6 \n" "vdup.f32 q9, %6 \n" // inch loop "lsr r4, %7, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%1 :128]! \n" // "vld1.f32 {d12-d15}, [%1 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "pld [%1, #512] \n" "vldm %1!, {d24-d31} \n" // "vld1.f32 {d24-d27}, [%1 :128]! \n" // "vld1.f32 {d28-d31}, [%1 :128]! \n" "vmla.f32 q8, q6, d0[1] \n" "vmla.f32 q9, q7, d0[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q12, d1[0] \n" "vmla.f32 q9, q13, d1[0] \n" "vmla.f32 q8, q14, d1[1] \n" "vmla.f32 q9, q15, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %7, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #256] \n" "vld1.f32 {d8-d11}, [%1 :128]! \n" "pld [%2, #32] \n" "vld1.f32 {d0[],d1[]}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "vmla.f32 q9, q5, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else float sum0 = bias0; float sum1 = bias0; float sum2 = bias0; float sum3 = bias0; float sum4 = bias0; float sum5 = bias0; float sum6 = bias0; float sum7 = bias0; for (int q=0; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; sum4 += tmpptr[4] * kptr[0]; sum5 += tmpptr[5] * kptr[0]; sum6 += tmpptr[6] * kptr[0]; sum7 += tmpptr[7] * kptr[0]; tmpptr += 8; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0[4] = sum4; outptr0[5] = sum5; outptr0[6] = sum6; outptr0[7] = sum7; outptr0 += 8; #endif // __ARM_NEON } for (; i+3<size; i+=4) { const float* tmpptr = tmp.channel(i/8 + (i%8)/4); #if __ARM_NEON && __aarch64__ const float* kptr = kernel.channel(p/8 + (p%8)/4 + p%4); #else const float* kptr = kernel.channel(p/4 + p%4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "dup v8.4s, %w6 \n" // inch loop "lsr w4, %w7, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v8.4s, v5.4s, v0.s[1] \n" "fmla v8.4s, v6.4s, v0.s[2] \n" "fmla v8.4s, v7.4s, v0.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w7, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v4.4s}, [%1], #16 \n" "prfm pldl1keep, [%2, #32] \n" "ld1r {v0.4s}, [%2], #4 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.4s \n" "bne 2b \n" "3: \n" "st1 {v8.4s}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8" ); #else // __aarch64__ asm volatile( "vdup.f32 q8, %6 \n" // inch loop "lsr r4, %7, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%1 :128]! \n" // "vld1.f32 {d12-d15}, [%1 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %7, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #128] \n" "vld1.f32 {d8-d9}, [%1 :128]! \n" "pld [%2, #32] \n" "vld1.f32 {d0[],d1[]}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8" ); #endif // __aarch64__ #else float sum0 = bias0; float sum1 = bias0; float sum2 = bias0; float sum3 = bias0; for (int q=0; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0 += 4; #endif // __ARM_NEON } for (; i<size; i++) { const float* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); #if __ARM_NEON && __aarch64__ const float* kptr = kernel.channel(p/8 + (p%8)/4 + p%4); #else const float* kptr = kernel.channel(p/4 + p%4); #endif // __ARM_NEON && __aarch64__ int q = 0; #if __ARM_NEON float32x4_t _sum0 = vdupq_n_f32(0.f); for (; q+3<inch; q+=4) { float32x4_t _p0 = vld1q_f32(tmpptr); tmpptr += 4; float32x4_t _k0 = vld1q_f32(kptr); kptr += 4; #if __aarch64__ _sum0 = vfmaq_f32(_sum0, _p0, _k0); #else _sum0 = vmlaq_f32(_sum0, _p0, _k0); #endif } #if __aarch64__ float sum0 = bias0 + vaddvq_f32(_sum0); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float sum0 = bias0 + vget_lane_f32(vpadd_f32(_ss, _ss), 0); #endif #else float sum0 = bias0; #endif // __ARM_NEON for (; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = sum0; outptr0++; } } // // NOTE sgemm // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // const float bias0 = bias ? bias[p] : 0.f; // // float* outptr0 = out0; // // for (int i=0; i<size; i++) // { // float sum = bias0; // // const float* kptr = _kernel.channel(p/8 + p%8); // // for (int q=0; q<inch; q++) // { // const float* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } } static void conv1x1s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p+1); Mat out2 = top_blob.channel(p+2); Mat out3 = top_blob.channel(p+3); Mat out4 = top_blob.channel(p+4); Mat out5 = top_blob.channel(p+5); Mat out6 = top_blob.channel(p+6); Mat out7 = top_blob.channel(p+7); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; const float bias2 = bias ? bias[p+2] : 0.f; const float bias3 = bias ? bias[p+3] : 0.f; const float bias4 = bias ? bias[p+4] : 0.f; const float bias5 = bias ? bias[p+5] : 0.f; const float bias6 = bias ? bias[p+6] : 0.f; const float bias7 = bias ? bias[p+7] : 0.f; out0.fill(bias0); out1.fill(bias1); out2.fill(bias2); out3.fill(bias3); out4.fill(bias4); out5.fill(bias5); out6.fill(bias6); out7.fill(bias7); int q = 0; for (; q+7<inch; q+=8) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; float* outptr4 = out4; float* outptr5 = out5; float* outptr6 = out6; float* outptr7 = out7; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* img4 = bottom_blob.channel(q+4); const float* img5 = bottom_blob.channel(q+5); const float* img6 = bottom_blob.channel(q+6); const float* img7 = bottom_blob.channel(q+7); const float* kernel0 = kernel + p*inch + q; const float* kernel1 = kernel + (p+1)*inch + q; const float* kernel2 = kernel + (p+2)*inch + q; const float* kernel3 = kernel + (p+3)*inch + q; const float* kernel4 = kernel + (p+4)*inch + q; const float* kernel5 = kernel + (p+5)*inch + q; const float* kernel6 = kernel + (p+6)*inch + q; const float* kernel7 = kernel + (p+7)*inch + q; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; const float* r4 = img4; const float* r5 = img5; const float* r6 = img6; const float* r7 = img7; int size = outw * outh; int nn = size >> 2; int remain = size & 3; float32x4_t _k0 = vld1q_f32(kernel0); float32x4_t _k1 = vld1q_f32(kernel1); float32x4_t _k2 = vld1q_f32(kernel2); float32x4_t _k3 = vld1q_f32(kernel3); float32x4_t _k4 = vld1q_f32(kernel4); float32x4_t _k5 = vld1q_f32(kernel5); float32x4_t _k6 = vld1q_f32(kernel6); float32x4_t _k7 = vld1q_f32(kernel7); float32x4_t _k0n = vld1q_f32(kernel0+4); float32x4_t _k1n = vld1q_f32(kernel1+4); float32x4_t _k2n = vld1q_f32(kernel2+4); float32x4_t _k3n = vld1q_f32(kernel3+4); float32x4_t _k4n = vld1q_f32(kernel4+4); float32x4_t _k5n = vld1q_f32(kernel5+4); float32x4_t _k6n = vld1q_f32(kernel6+4); float32x4_t _k7n = vld1q_f32(kernel7+4); #ifdef __clang__ // gcc reject over 30 oprands :( if (nn > 0) { asm volatile( "prfm pldl1keep, [%9, #128] \n" "ld1 {v17.4s}, [%9], #16 \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v18.4s}, [%1] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v19.4s}, [%2] \n" "0: \n" "fmla v18.4s, v17.4s, %34.s[0] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v20.4s}, [%3] \n" "fmla v19.4s, v17.4s, %35.s[0] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v21.4s}, [%4] \n" "fmla v20.4s, v17.4s, %36.s[0] \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v22.4s}, [%5] \n" "fmla v21.4s, v17.4s, %37.s[0] \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v23.4s}, [%6] \n" "fmla v22.4s, v17.4s, %38.s[0] \n" "prfm pldl1keep, [%10, #128] \n" "ld1 {v16.4s}, [%10], #16 \n" "fmla v23.4s, v17.4s, %39.s[0] \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v24.4s}, [%7] \n" "fmla v18.4s, v16.4s, %34.s[1] \n" "fmla v19.4s, v16.4s, %35.s[1] \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v25.4s}, [%8] \n" "fmla v24.4s, v17.4s, %40.s[0] \n" "fmla v25.4s, v17.4s, %41.s[0] \n" "fmla v20.4s, v16.4s, %36.s[1] \n" "fmla v21.4s, v16.4s, %37.s[1] \n" "prfm pldl1keep, [%11, #128] \n" "ld1 {v17.4s}, [%11], #16 \n" "fmla v22.4s, v16.4s, %38.s[1] \n" "fmla v23.4s, v16.4s, %39.s[1] \n" "fmla v18.4s, v17.4s, %34.s[2] \n" "fmla v19.4s, v17.4s, %35.s[2] \n" "fmla v24.4s, v16.4s, %40.s[1] \n" "fmla v25.4s, v16.4s, %41.s[1] \n" "fmla v20.4s, v17.4s, %36.s[2] \n" "fmla v21.4s, v17.4s, %37.s[2] \n" "prfm pldl1keep, [%12, #128] \n" "ld1 {v16.4s}, [%12], #16 \n" "fmla v22.4s, v17.4s, %38.s[2] \n" "fmla v23.4s, v17.4s, %39.s[2] \n" "fmla v18.4s, v16.4s, %34.s[3] \n" "fmla v19.4s, v16.4s, %35.s[3] \n" "fmla v24.4s, v17.4s, %40.s[2] \n" "fmla v25.4s, v17.4s, %41.s[2] \n" "fmla v20.4s, v16.4s, %36.s[3] \n" "fmla v21.4s, v16.4s, %37.s[3] \n" "prfm pldl1keep, [%13, #128] \n" "ld1 {v17.4s}, [%13], #16 \n" "fmla v22.4s, v16.4s, %38.s[3] \n" "fmla v23.4s, v16.4s, %39.s[3] \n" "fmla v18.4s, v17.4s, %42.s[0] \n" "fmla v19.4s, v17.4s, %43.s[0] \n" "fmla v24.4s, v16.4s, %40.s[3] \n" "fmla v25.4s, v16.4s, %41.s[3] \n" "fmla v20.4s, v17.4s, %44.s[0] \n" "fmla v21.4s, v17.4s, %45.s[0] \n" "prfm pldl1keep, [%14, #128] \n" "ld1 {v16.4s}, [%14], #16 \n" "fmla v22.4s, v17.4s, %46.s[0] \n" "fmla v23.4s, v17.4s, %47.s[0] \n" "fmla v18.4s, v16.4s, %42.s[1] \n" "fmla v19.4s, v16.4s, %43.s[1] \n" "fmla v24.4s, v17.4s, %48.s[0] \n" "fmla v25.4s, v17.4s, %49.s[0] \n" "fmla v20.4s, v16.4s, %44.s[1] \n" "fmla v21.4s, v16.4s, %45.s[1] \n" "prfm pldl1keep, [%15, #128] \n" "ld1 {v17.4s}, [%15], #16 \n" "fmla v22.4s, v16.4s, %46.s[1] \n" "fmla v23.4s, v16.4s, %47.s[1] \n" "fmla v18.4s, v17.4s, %42.s[2] \n" "fmla v19.4s, v17.4s, %43.s[2] \n" "fmla v24.4s, v16.4s, %48.s[1] \n" "fmla v25.4s, v16.4s, %49.s[1] \n" "fmla v20.4s, v17.4s, %44.s[2] \n" "fmla v21.4s, v17.4s, %45.s[2] \n" "prfm pldl1keep, [%16, #128] \n" "ld1 {v16.4s}, [%16], #16 \n" "fmla v22.4s, v17.4s, %46.s[2] \n" "fmla v23.4s, v17.4s, %47.s[2] \n" "fmla v18.4s, v16.4s, %42.s[3] \n" "fmla v19.4s, v16.4s, %43.s[3] \n" "fmla v24.4s, v17.4s, %48.s[2] \n" "fmla v25.4s, v17.4s, %49.s[2] \n" "fmla v20.4s, v16.4s, %44.s[3] \n" "fmla v21.4s, v16.4s, %45.s[3] \n" "st1 {v18.4s}, [%1], #16 \n" "fmla v22.4s, v16.4s, %46.s[3] \n" "st1 {v19.4s}, [%2], #16 \n" "fmla v23.4s, v16.4s, %47.s[3] \n" "st1 {v20.4s}, [%3], #16 \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v17.4s}, [%9], #16 \n" "fmla v24.4s, v16.4s, %48.s[3] \n" "st1 {v21.4s}, [%4], #16 \n" "fmla v25.4s, v16.4s, %49.s[3] \n" "st1 {v22.4s}, [%5], #16 \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v18.4s}, [%1] \n" "st1 {v23.4s}, [%6], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v19.4s}, [%2] \n" "st1 {v24.4s}, [%7], #16 \n" "subs %w0, %w0, #1 \n" "st1 {v25.4s}, [%8], #16 \n" "bne 0b \n" "sub %9, %9, #16 \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(outptr4),// %5 "=r"(outptr5),// %6 "=r"(outptr6),// %7 "=r"(outptr7),// %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(r3), // %12 "=r"(r4), // %13 "=r"(r5), // %14 "=r"(r6), // %15 "=r"(r7) // %16 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(r3), "13"(r4), "14"(r5), "15"(r6), "16"(r7), "w"(_k0), // %34 "w"(_k1), // %35 "w"(_k2), // %36 "w"(_k3), // %37 "w"(_k4), // %38 "w"(_k5), // %39 "w"(_k6), // %40 "w"(_k7), // %41 "w"(_k0n), // %42 "w"(_k1n), // %43 "w"(_k2n), // %44 "w"(_k3n), // %45 "w"(_k4n), // %46 "w"(_k5n), // %47 "w"(_k6n), // %48 "w"(_k7n) // %49 : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25"//, "v26", "v27", "v28", "v29", "v30", "v31" ); } #else for (; nn>0; nn--) { float32x4_t _p = vld1q_f32(r0); float32x4_t _out0p = vld1q_f32(outptr0); float32x4_t _out1p = vld1q_f32(outptr1); float32x4_t _out2p = vld1q_f32(outptr2); float32x4_t _out3p = vld1q_f32(outptr3); float32x4_t _out4p = vld1q_f32(outptr4); float32x4_t _out5p = vld1q_f32(outptr5); float32x4_t _out6p = vld1q_f32(outptr6); float32x4_t _out7p = vld1q_f32(outptr7); _out0p = vfmaq_laneq_f32(_out0p, _p, _k0, 0); _out1p = vfmaq_laneq_f32(_out1p, _p, _k1, 0); _out2p = vfmaq_laneq_f32(_out2p, _p, _k2, 0); _out3p = vfmaq_laneq_f32(_out3p, _p, _k3, 0); _out4p = vfmaq_laneq_f32(_out4p, _p, _k4, 0); _out5p = vfmaq_laneq_f32(_out5p, _p, _k5, 0); _out6p = vfmaq_laneq_f32(_out6p, _p, _k6, 0); _out7p = vfmaq_laneq_f32(_out7p, _p, _k7, 0); float32x4_t _p1 = vld1q_f32(r1); _out0p = vfmaq_laneq_f32(_out0p, _p1, _k0, 1); _out1p = vfmaq_laneq_f32(_out1p, _p1, _k1, 1); _out2p = vfmaq_laneq_f32(_out2p, _p1, _k2, 1); _out3p = vfmaq_laneq_f32(_out3p, _p1, _k3, 1); _out4p = vfmaq_laneq_f32(_out4p, _p1, _k4, 1); _out5p = vfmaq_laneq_f32(_out5p, _p1, _k5, 1); _out6p = vfmaq_laneq_f32(_out6p, _p1, _k6, 1); _out7p = vfmaq_laneq_f32(_out7p, _p1, _k7, 1); float32x4_t _p2 = vld1q_f32(r2); _out0p = vfmaq_laneq_f32(_out0p, _p2, _k0, 2); _out1p = vfmaq_laneq_f32(_out1p, _p2, _k1, 2); _out2p = vfmaq_laneq_f32(_out2p, _p2, _k2, 2); _out3p = vfmaq_laneq_f32(_out3p, _p2, _k3, 2); _out4p = vfmaq_laneq_f32(_out4p, _p2, _k4, 2); _out5p = vfmaq_laneq_f32(_out5p, _p2, _k5, 2); _out6p = vfmaq_laneq_f32(_out6p, _p2, _k6, 2); _out7p = vfmaq_laneq_f32(_out7p, _p2, _k7, 2); float32x4_t _p3 = vld1q_f32(r3); _out0p = vfmaq_laneq_f32(_out0p, _p3, _k0, 3); _out1p = vfmaq_laneq_f32(_out1p, _p3, _k1, 3); _out2p = vfmaq_laneq_f32(_out2p, _p3, _k2, 3); _out3p = vfmaq_laneq_f32(_out3p, _p3, _k3, 3); _out4p = vfmaq_laneq_f32(_out4p, _p3, _k4, 3); _out5p = vfmaq_laneq_f32(_out5p, _p3, _k5, 3); _out6p = vfmaq_laneq_f32(_out6p, _p3, _k6, 3); _out7p = vfmaq_laneq_f32(_out7p, _p3, _k7, 3); float32x4_t _p4 = vld1q_f32(r4); _out0p = vfmaq_laneq_f32(_out0p, _p4, _k0n, 0); _out1p = vfmaq_laneq_f32(_out1p, _p4, _k1n, 0); _out2p = vfmaq_laneq_f32(_out2p, _p4, _k2n, 0); _out3p = vfmaq_laneq_f32(_out3p, _p4, _k3n, 0); _out4p = vfmaq_laneq_f32(_out4p, _p4, _k4n, 0); _out5p = vfmaq_laneq_f32(_out5p, _p4, _k5n, 0); _out6p = vfmaq_laneq_f32(_out6p, _p4, _k6n, 0); _out7p = vfmaq_laneq_f32(_out7p, _p4, _k7n, 0); float32x4_t _p5 = vld1q_f32(r5); _out0p = vfmaq_laneq_f32(_out0p, _p5, _k0n, 1); _out1p = vfmaq_laneq_f32(_out1p, _p5, _k1n, 1); _out2p = vfmaq_laneq_f32(_out2p, _p5, _k2n, 1); _out3p = vfmaq_laneq_f32(_out3p, _p5, _k3n, 1); _out4p = vfmaq_laneq_f32(_out4p, _p5, _k4n, 1); _out5p = vfmaq_laneq_f32(_out5p, _p5, _k5n, 1); _out6p = vfmaq_laneq_f32(_out6p, _p5, _k6n, 1); _out7p = vfmaq_laneq_f32(_out7p, _p5, _k7n, 1); float32x4_t _p6 = vld1q_f32(r6); _out0p = vfmaq_laneq_f32(_out0p, _p6, _k0n, 2); _out1p = vfmaq_laneq_f32(_out1p, _p6, _k1n, 2); _out2p = vfmaq_laneq_f32(_out2p, _p6, _k2n, 2); _out3p = vfmaq_laneq_f32(_out3p, _p6, _k3n, 2); _out4p = vfmaq_laneq_f32(_out4p, _p6, _k4n, 2); _out5p = vfmaq_laneq_f32(_out5p, _p6, _k5n, 2); _out6p = vfmaq_laneq_f32(_out6p, _p6, _k6n, 2); _out7p = vfmaq_laneq_f32(_out7p, _p6, _k7n, 2); float32x4_t _p7 = vld1q_f32(r7); _out0p = vfmaq_laneq_f32(_out0p, _p7, _k0n, 3); _out1p = vfmaq_laneq_f32(_out1p, _p7, _k1n, 3); _out2p = vfmaq_laneq_f32(_out2p, _p7, _k2n, 3); _out3p = vfmaq_laneq_f32(_out3p, _p7, _k3n, 3); _out4p = vfmaq_laneq_f32(_out4p, _p7, _k4n, 3); _out5p = vfmaq_laneq_f32(_out5p, _p7, _k5n, 3); _out6p = vfmaq_laneq_f32(_out6p, _p7, _k6n, 3); _out7p = vfmaq_laneq_f32(_out7p, _p7, _k7n, 3); vst1q_f32(outptr0, _out0p); vst1q_f32(outptr1, _out1p); vst1q_f32(outptr2, _out2p); vst1q_f32(outptr3, _out3p); vst1q_f32(outptr4, _out4p); vst1q_f32(outptr5, _out5p); vst1q_f32(outptr6, _out6p); vst1q_f32(outptr7, _out7p); r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; r6 += 4; r7 += 4; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; outptr4 += 4; outptr5 += 4; outptr6 += 4; outptr7 += 4; } #endif for (; remain>0; remain--) { // TODO neon optimize float sum0 = *r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3] + *r4 * kernel0[4] + *r5 * kernel0[5] + *r6 * kernel0[6] + *r7 * kernel0[7]; float sum1 = *r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3] + *r4 * kernel1[4] + *r5 * kernel1[5] + *r6 * kernel1[6] + *r7 * kernel1[7]; float sum2 = *r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3] + *r4 * kernel2[4] + *r5 * kernel2[5] + *r6 * kernel2[6] + *r7 * kernel2[7]; float sum3 = *r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3] + *r4 * kernel3[4] + *r5 * kernel3[5] + *r6 * kernel3[6] + *r7 * kernel3[7]; float sum4 = *r0 * kernel4[0] + *r1 * kernel4[1] + *r2 * kernel4[2] + *r3 * kernel4[3] + *r4 * kernel4[4] + *r5 * kernel4[5] + *r6 * kernel4[6] + *r7 * kernel4[7]; float sum5 = *r0 * kernel5[0] + *r1 * kernel5[1] + *r2 * kernel5[2] + *r3 * kernel5[3] + *r4 * kernel5[4] + *r5 * kernel5[5] + *r6 * kernel5[6] + *r7 * kernel5[7]; float sum6 = *r0 * kernel6[0] + *r1 * kernel6[1] + *r2 * kernel6[2] + *r3 * kernel6[3] + *r4 * kernel6[4] + *r5 * kernel6[5] + *r6 * kernel6[6] + *r7 * kernel6[7]; float sum7 = *r0 * kernel7[0] + *r1 * kernel7[1] + *r2 * kernel7[2] + *r3 * kernel7[3] + *r4 * kernel7[4] + *r5 * kernel7[5] + *r6 * kernel7[6] + *r7 * kernel7[7]; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; *outptr6 += sum6; *outptr7 += sum7; r0++; r1++; r2++; r3++; r4++; r5++; r6++; r7++; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; } } for (; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; float* outptr4 = out4; float* outptr5 = out5; float* outptr6 = out6; float* outptr7 = out7; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float* kernel1 = kernel + (p+1)*inch + q; const float* kernel2 = kernel + (p+2)*inch + q; const float* kernel3 = kernel + (p+3)*inch + q; const float* kernel4 = kernel + (p+4)*inch + q; const float* kernel5 = kernel + (p+5)*inch + q; const float* kernel6 = kernel + (p+6)*inch + q; const float* kernel7 = kernel + (p+7)*inch + q; const float k0 = kernel0[0]; const float k1 = kernel1[0]; const float k2 = kernel2[0]; const float k3 = kernel3[0]; const float k4 = kernel4[0]; const float k5 = kernel5[0]; const float k6 = kernel6[0]; const float k7 = kernel7[0]; const float* r0 = img0; int size = outw * outh; int nn = size >> 2; int remain = size & 3; float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); float32x4_t _k4 = vdupq_n_f32(k4); float32x4_t _k5 = vdupq_n_f32(k5); float32x4_t _k6 = vdupq_n_f32(k6); float32x4_t _k7 = vdupq_n_f32(k7); for (; nn>0; nn--) { float32x4_t _p = vld1q_f32(r0); float32x4_t _out0p = vld1q_f32(outptr0); float32x4_t _out1p = vld1q_f32(outptr1); float32x4_t _out2p = vld1q_f32(outptr2); float32x4_t _out3p = vld1q_f32(outptr3); float32x4_t _out4p = vld1q_f32(outptr4); float32x4_t _out5p = vld1q_f32(outptr5); float32x4_t _out6p = vld1q_f32(outptr6); float32x4_t _out7p = vld1q_f32(outptr7); _out0p = vfmaq_f32(_out0p, _p, _k0); _out1p = vfmaq_f32(_out1p, _p, _k1); _out2p = vfmaq_f32(_out2p, _p, _k2); _out3p = vfmaq_f32(_out3p, _p, _k3); _out4p = vfmaq_f32(_out4p, _p, _k4); _out5p = vfmaq_f32(_out5p, _p, _k5); _out6p = vfmaq_f32(_out6p, _p, _k6); _out7p = vfmaq_f32(_out7p, _p, _k7); vst1q_f32(outptr0, _out0p); vst1q_f32(outptr1, _out1p); vst1q_f32(outptr2, _out2p); vst1q_f32(outptr3, _out3p); vst1q_f32(outptr4, _out4p); vst1q_f32(outptr5, _out5p); vst1q_f32(outptr6, _out6p); vst1q_f32(outptr7, _out7p); r0 += 4; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; outptr4 += 4; outptr5 += 4; outptr6 += 4; outptr7 += 4; } for (; remain>0; remain--) { // TODO neon optimize float sum0 = *r0 * k0; float sum1 = *r0 * k1; float sum2 = *r0 * k2; float sum3 = *r0 * k3; float sum4 = *r0 * k4; float sum5 = *r0 * k5; float sum6 = *r0 * k6; float sum7 = *r0 * k7; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; *outptr6 += sum6; *outptr7 += sum7; r0++; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; } } } #else nn_outch = outch / 6; remain_outch_start = nn_outch * 6; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 6; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p+1); Mat out2 = top_blob.channel(p+2); Mat out3 = top_blob.channel(p+3); Mat out4 = top_blob.channel(p+4); Mat out5 = top_blob.channel(p+5); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; const float bias2 = bias ? bias[p+2] : 0.f; const float bias3 = bias ? bias[p+3] : 0.f; const float bias4 = bias ? bias[p+4] : 0.f; const float bias5 = bias ? bias[p+5] : 0.f; out0.fill(bias0); out1.fill(bias1); out2.fill(bias2); out3.fill(bias3); out4.fill(bias4); out5.fill(bias5); int q = 0; for (; q+3<inch; q+=4) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; float* outptr4 = out4; float* outptr5 = out5; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float* kernel1 = kernel + (p+1)*inch + q; const float* kernel2 = kernel + (p+2)*inch + q; const float* kernel3 = kernel + (p+3)*inch + q; const float* kernel4 = kernel + (p+4)*inch + q; const float* kernel5 = kernel + (p+5)*inch + q; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; int size = outw * outh; #if __ARM_NEON int nn = size >> 2; int remain = size & 3; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vld1q_f32(kernel0); float32x4_t _k1 = vld1q_f32(kernel1); float32x4_t _k2 = vld1q_f32(kernel2); float32x4_t _k3 = vld1q_f32(kernel3); float32x4_t _k4 = vld1q_f32(kernel4); float32x4_t _k5 = vld1q_f32(kernel5); if (nn > 0) { asm volatile( "pld [%7, #128] \n" "vld1.f32 {d24-d25}, [%7 :128]! \n"// q12 = r0 "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1 :128] \n"// q6 = outptr0 "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2 :128] \n"// q7 = outptr1 "vmla.f32 q6, q12, %e22[0] \n" "0: \n" "pld [%3, #128] \n" "vld1.f32 {d16-d17}, [%3 :128] \n"// q8 = outptr2 "vmla.f32 q7, q12, %e23[0] \n" "pld [%4, #128] \n" "vld1.f32 {d18-d19}, [%4 :128] \n"// q9 = outptr3 "vmla.f32 q8, q12, %e24[0] \n" "pld [%8, #128] \n" "vld1.f32 {d26-d27}, [%8 :128]! \n"// q13 = r1 "vmla.f32 q9, q12, %e25[0] \n" "pld [%5, #128] \n" "vld1.f32 {d20-d21}, [%5 :128] \n"// q10 = outptr4 "vmla.f32 q6, q13, %e22[1] \n" "vmla.f32 q7, q13, %e23[1] \n" "pld [%6, #128] \n" "vld1.f32 {d22-d23}, [%6 :128] \n"// q11 = outptr5 "vmla.f32 q10, q12, %e26[0] \n" "vmla.f32 q11, q12, %e27[0] \n" "vmla.f32 q8, q13, %e24[1] \n" "vmla.f32 q9, q13, %e25[1] \n" "pld [%9, #128] \n" "vld1.f32 {d28-d29}, [%9 :128]! \n"// q14 = r2 "vmla.f32 q10, q13, %e26[1] \n" "vmla.f32 q11, q13, %e27[1] \n" "vmla.f32 q6, q14, %f22[0] \n" "vmla.f32 q7, q14, %f23[0] \n" "vmla.f32 q8, q14, %f24[0] \n" "vmla.f32 q9, q14, %f25[0] \n" "pld [%10, #128] \n" "vld1.f32 {d30-d31}, [%10 :128]! \n"// q15 = r3 "vmla.f32 q10, q14, %f26[0] \n" "vmla.f32 q11, q14, %f27[0] \n" "vmla.f32 q6, q15, %f22[1] \n" "vmla.f32 q7, q15, %f23[1] \n" "vmla.f32 q8, q15, %f24[1] \n" "vmla.f32 q9, q15, %f25[1] \n" "pld [%7, #128] \n" "vld1.f32 {d24-d25}, [%7 :128]! \n"// q12 = r0 "vmla.f32 q10, q15, %f26[1] \n" "vmla.f32 q11, q15, %f27[1] \n" "vst1.f32 {d12-d13}, [%1 :128]! \n" "vst1.f32 {d14-d15}, [%2 :128]! \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1 :128] \n"// q6 = outptr0 "vst1.f32 {d16-d17}, [%3 :128]! \n" "vst1.f32 {d18-d19}, [%4 :128]! \n" "vmla.f32 q6, q12, %e22[0] \n" "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2 :128] \n"// q7 = outptr1 "subs %0, #1 \n" "vst1.f32 {d20-d21}, [%5 :128]! \n" "vst1.f32 {d22-d23}, [%6 :128]! \n" "bne 0b \n" "sub %7, #16 \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(outptr4),// %5 "=r"(outptr5),// %6 "=r"(r0), // %7 "=r"(r1), // %8 "=r"(r2), // %9 "=r"(r3) // %10 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(r0), "8"(r1), "9"(r2), "10"(r3), "w"(_k0), // %22 "w"(_k1), // %23 "w"(_k2), // %24 "w"(_k3), // %25 "w"(_k4), // %26 "w"(_k5) // %27 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __ARM_NEON for (; remain>0; remain--) { // TODO neon optimize float sum0 = *r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3]; float sum1 = *r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3]; float sum2 = *r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3]; float sum3 = *r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3]; float sum4 = *r0 * kernel4[0] + *r1 * kernel4[1] + *r2 * kernel4[2] + *r3 * kernel4[3]; float sum5 = *r0 * kernel5[0] + *r1 * kernel5[1] + *r2 * kernel5[2] + *r3 * kernel5[3]; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; r0++; r1++; r2++; r3++; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; } } for (; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; float* outptr4 = out4; float* outptr5 = out5; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float* kernel1 = kernel + (p+1)*inch + q; const float* kernel2 = kernel + (p+2)*inch + q; const float* kernel3 = kernel + (p+3)*inch + q; const float* kernel4 = kernel + (p+4)*inch + q; const float* kernel5 = kernel + (p+5)*inch + q; const float k0 = kernel0[0]; const float k1 = kernel1[0]; const float k2 = kernel2[0]; const float k3 = kernel3[0]; const float k4 = kernel4[0]; const float k5 = kernel5[0]; const float* r0 = img0; int size = outw * outh; #if __ARM_NEON int nn = size >> 2; int remain = size & 3; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); float32x4_t _k4 = vdupq_n_f32(k4); float32x4_t _k5 = vdupq_n_f32(k5); if (nn > 0) { asm volatile( "pld [%7, #128] \n" "vld1.f32 {d24-d25}, [%7 :128]! \n"// q12 = r0 "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1 :128] \n"// q6 = outptr0 "0: \n" "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2 :128] \n"// q7 = outptr1 "vmla.f32 q6, q12, %q16 \n" "pld [%3, #128] \n" "vld1.f32 {d16-d17}, [%3 :128] \n"// q8 = outptr2 "vmla.f32 q7, q12, %q17 \n" "pld [%4, #128] \n" "vld1.f32 {d18-d19}, [%4 :128] \n"// q9 = outptr3 "vmla.f32 q8, q12, %q18 \n" "pld [%5, #128] \n" "vld1.f32 {d20-d21}, [%5 :128] \n"// q10 = outptr4 "vmla.f32 q9, q12, %q19 \n" "pld [%6, #128] \n" "vld1.f32 {d22-d23}, [%6 :128] \n"// q11 = outptr5 "vmla.f32 q10, q12, %q20 \n" "vmla.f32 q11, q12, %q21 \n" "pld [%7, #128] \n" "vld1.f32 {d24-d25}, [%7 :128]! \n"// q12 = r0 "vst1.f32 {d12-d13}, [%1 :128]! \n" "vst1.f32 {d14-d15}, [%2 :128]! \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1 :128] \n"// q6 = outptr0 "vst1.f32 {d16-d17}, [%3 :128]! \n" "vst1.f32 {d18-d19}, [%4 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d20-d21}, [%5 :128]! \n" "vst1.f32 {d22-d23}, [%6 :128]! \n" "bne 0b \n" "sub %7, #16 \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(outptr4),// %5 "=r"(outptr5),// %6 "=r"(r0) // %7 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(r0), "w"(_k0), // %16 "w"(_k1), // %17 "w"(_k2), // %18 "w"(_k3), // %19 "w"(_k4), // %20 "w"(_k5) // %21 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); } #endif // __ARM_NEON for (; remain>0; remain--) { // TODO neon optimize float sum0 = *r0 * k0; float sum1 = *r0 * k1; float sum2 = *r0 * k2; float sum3 = *r0 * k3; float sum4 = *r0 * k4; float sum5 = *r0 * k5; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; r0++; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; } } } #endif // __ARM_NEON && __aarch64__ nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p+1); Mat out2 = top_blob.channel(p+2); Mat out3 = top_blob.channel(p+3); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; const float bias2 = bias ? bias[p+2] : 0.f; const float bias3 = bias ? bias[p+3] : 0.f; out0.fill(bias0); out1.fill(bias1); out2.fill(bias2); out3.fill(bias3); int q = 0; for (; q+3<inch; q+=4) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float* kernel1 = kernel + (p+1)*inch + q; const float* kernel2 = kernel + (p+2)*inch + q; const float* kernel3 = kernel + (p+3)*inch + q; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vld1q_f32(kernel0); float32x4_t _k1 = vld1q_f32(kernel1); float32x4_t _k2 = vld1q_f32(kernel2); float32x4_t _k3 = vld1q_f32(kernel3); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%5, #256] \n" "ld1 {v6.4s, v7.4s}, [%5], #32 \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" "0: \n" "fmla v8.4s, v6.4s, %18.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2] \n" "fmla v9.4s, v7.4s, %18.s[0] \n" "fmla v10.4s, v6.4s, %19.s[0] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4s, v13.4s}, [%3] \n" "fmla v11.4s, v7.4s, %19.s[0] \n" "fmla v12.4s, v6.4s, %20.s[0] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v14.4s, v15.4s}, [%4] \n" "fmla v13.4s, v7.4s, %20.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v4.4s, v5.4s}, [%6], #32 \n" "fmla v14.4s, v6.4s, %21.s[0] \n" "fmla v15.4s, v7.4s, %21.s[0] \n" "fmla v8.4s, v4.4s, %18.s[1] \n" "fmla v9.4s, v5.4s, %18.s[1] \n" "fmla v10.4s, v4.4s, %19.s[1] \n" "fmla v11.4s, v5.4s, %19.s[1] \n" "fmla v12.4s, v4.4s, %20.s[1] \n" "fmla v13.4s, v5.4s, %20.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v6.4s, v7.4s}, [%7], #32 \n" "fmla v14.4s, v4.4s, %21.s[1] \n" "fmla v15.4s, v5.4s, %21.s[1] \n" "fmla v8.4s, v6.4s, %18.s[2] \n" "fmla v9.4s, v7.4s, %18.s[2] \n" "fmla v10.4s, v6.4s, %19.s[2] \n" "fmla v11.4s, v7.4s, %19.s[2] \n" "fmla v12.4s, v6.4s, %20.s[2] \n" "fmla v13.4s, v7.4s, %20.s[2] \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v4.4s, v5.4s}, [%8], #32 \n" "fmla v14.4s, v6.4s, %21.s[2] \n" "fmla v15.4s, v7.4s, %21.s[2] \n" "fmla v8.4s, v4.4s, %18.s[3] \n" "fmla v9.4s, v5.4s, %18.s[3] \n" "fmla v10.4s, v4.4s, %19.s[3] \n" "fmla v11.4s, v5.4s, %19.s[3] \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "fmla v12.4s, v4.4s, %20.s[3] \n" "fmla v13.4s, v5.4s, %20.s[3] \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v6.4s, v7.4s}, [%5], #32 \n" "fmla v14.4s, v4.4s, %21.s[3] \n" "fmla v15.4s, v5.4s, %21.s[3] \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" "subs %w0, %w0, #1 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "bne 0b \n" "sub %5, %5, #32 \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k0), // %18 "w"(_k1), // %19 "w"(_k2), // %20 "w"(_k3) // %21 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%5, #256] \n" "vld1.f32 {d12-d15}, [%5 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n" "0: \n" "vmla.f32 q8, q6, %e18[0] \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n" "vmla.f32 q9, q7, %e18[0] \n" "vmla.f32 q10, q6, %e19[0] \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128] \n" "vmla.f32 q11, q7, %e19[0] \n" "vmla.f32 q12, q6, %e20[0] \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128] \n" "vmla.f32 q13, q7, %e20[0] \n" "pld [%6, #256] \n" "vld1.f32 {d8-d11}, [%6 :128]! \n" "vmla.f32 q14, q6, %e21[0] \n" "vmla.f32 q15, q7, %e21[0] \n" "vmla.f32 q8, q4, %e18[1] \n" "vmla.f32 q9, q5, %e18[1] \n" "vmla.f32 q10, q4, %e19[1] \n" "vmla.f32 q11, q5, %e19[1] \n" "vmla.f32 q12, q4, %e20[1] \n" "vmla.f32 q13, q5, %e20[1] \n" "pld [%7, #256] \n" "vld1.f32 {d12-d15}, [%7 :128]! \n" "vmla.f32 q14, q4, %e21[1] \n" "vmla.f32 q15, q5, %e21[1] \n" "vmla.f32 q8, q6, %f18[0] \n" "vmla.f32 q9, q7, %f18[0] \n" "vmla.f32 q10, q6, %f19[0] \n" "vmla.f32 q11, q7, %f19[0] \n" "vmla.f32 q12, q6, %f20[0] \n" "vmla.f32 q13, q7, %f20[0] \n" "pld [%8, #256] \n" "vld1.f32 {d8-d11}, [%8 :128]! \n" "vmla.f32 q14, q6, %f21[0] \n" "vmla.f32 q15, q7, %f21[0] \n" "vmla.f32 q8, q4, %f18[1] \n" "vmla.f32 q9, q5, %f18[1] \n" "vmla.f32 q10, q4, %f19[1] \n" "vmla.f32 q11, q5, %f19[1] \n" "vmla.f32 q12, q4, %f20[1] \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "vmla.f32 q13, q5, %f20[1] \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "vmla.f32 q14, q4, %f21[1] \n" "pld [%5, #256] \n" "vld1.f32 {d12-d15}, [%5 :128]! \n" "vmla.f32 q15, q5, %f21[1] \n" "vst1.f32 {d24-d27}, [%3 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n" "subs %0, #1 \n" "vst1.f32 {d28-d31}, [%4 :128]! \n" "bne 0b \n" "sub %5, #32 \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k0), // %18 "w"(_k1), // %19 "w"(_k2), // %20 "w"(_k3) // %21 : "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { // TODO neon optimize float sum0 = *r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3]; float sum1 = *r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3]; float sum2 = *r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3]; float sum3 = *r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3]; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; r0++; r1++; r2++; r3++; outptr0++; outptr1++; outptr2++; outptr3++; } } for (; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float* kernel1 = kernel + (p+1)*inch + q; const float* kernel2 = kernel + (p+2)*inch + q; const float* kernel3 = kernel + (p+3)*inch + q; const float k0 = kernel0[0]; const float k1 = kernel1[0]; const float k2 = kernel2[0]; const float k3 = kernel3[0]; const float* r0 = img0; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%5, #256] \n" "ld1 {v6.4s, v7.4s}, [%5], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" "fmla v8.4s, v6.4s, %12.4s \n" "fmla v9.4s, v7.4s, %12.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2] \n" "fmla v10.4s, v6.4s, %13.4s \n" "fmla v11.4s, v7.4s, %13.4s \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4s, v13.4s}, [%3] \n" "fmla v12.4s, v6.4s, %14.4s \n" "fmla v13.4s, v7.4s, %14.4s \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v14.4s, v15.4s}, [%4] \n" "fmla v14.4s, v6.4s, %15.4s \n" "fmla v15.4s, v7.4s, %15.4s \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v6.4s, v7.4s}, [%5], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "bne 0b \n" "sub %5, %5, #32 \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%5, #256] \n" "vld1.f32 {d12-d15}, [%5 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n" "vmla.f32 q8, q6, %q12 \n" "vmla.f32 q9, q7, %q12 \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n" "vmla.f32 q10, q6, %q13 \n" "vmla.f32 q11, q7, %q13 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128] \n" "vmla.f32 q12, q6, %q14 \n" "vmla.f32 q13, q7, %q14 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128] \n" "vmla.f32 q14, q6, %q15 \n" "vmla.f32 q15, q7, %q15 \n" "vst1.f32 {d24-d27}, [%3 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d12-d15}, [%5 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d28-d31}, [%4 :128]! \n" "bne 0b \n" "sub %5, #32 \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { // TODO neon optimize float sum0 = *r0 * k0; float sum1 = *r0 * k1; float sum2 = *r0 * k2; float sum3 = *r0 * k3; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; r0++; outptr0++; outptr1++; outptr2++; outptr3++; } } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+3<inch; q+=4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld1 {v2.4s, v3.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4s, v1.4s}, [%1] \n" "fmla v0.4s, v2.4s, %12.4s \n" "fmla v1.4s, v3.4s, %12.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v2.4s, v3.4s}, [%3], #32 \n" "fmla v0.4s, v2.4s, %13.4s \n" "fmla v1.4s, v3.4s, %13.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v2.4s, v3.4s}, [%4], #32 \n" "fmla v0.4s, v2.4s, %14.4s \n" "fmla v1.4s, v3.4s, %14.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v2.4s, v3.4s}, [%5], #32 \n" "fmla v0.4s, v2.4s, %15.4s \n" "fmla v1.4s, v3.4s, %15.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v2.4s, v3.4s}, [%2], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "v0", "v1", "v2", "v3" ); } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" "vmla.f32 q0, q2, %q12 \n" "vmla.f32 q1, q3, %q12 \n" "pld [%3, #256] \n" "vld1.f32 {d4-d7}, [%3 :128]! \n" "vmla.f32 q0, q2, %q13 \n" "vmla.f32 q1, q3, %q13 \n" "pld [%4, #256] \n" "vld1.f32 {d4-d7}, [%4 :128]! \n" "vmla.f32 q0, q2, %q14 \n" "vmla.f32 q1, q3, %q14 \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q0, q2, %q15 \n" "vmla.f32 q1, q3, %q15 \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0++; r1++; r2++; r3++; outptr++; } } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float* r0 = img0; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld1 {v2.4s, v3.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4s, v1.4s}, [%1] \n" "fmla v0.4s, v2.4s, %6.4s \n" "fmla v1.4s, v3.4s, %6.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v2.4s, v3.4s}, [%2], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "v0", "v1", "v2", "v3" ); } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" "vmla.f32 q0, q2, %q6 \n" "vmla.f32 q1, q3, %q6 \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; *outptr += sum; r0++; outptr++; } } } } static void conv1x1s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p+1); Mat out2 = top_blob.channel(p+2); Mat out3 = top_blob.channel(p+3); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; const float bias2 = bias ? bias[p+2] : 0.f; const float bias3 = bias ? bias[p+3] : 0.f; out0.fill(bias0); out1.fill(bias1); out2.fill(bias2); out3.fill(bias3); int q = 0; for (; q+3<inch; q+=4) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float* kernel1 = kernel + (p+1)*inch + q; const float* kernel2 = kernel + (p+2)*inch + q; const float* kernel3 = kernel + (p+3)*inch + q; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; for (int i = 0; i < outh; i++) { int size = outw; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vld1q_f32(kernel0); float32x4_t _k1 = vld1q_f32(kernel1); float32x4_t _k2 = vld1q_f32(kernel2); float32x4_t _k3 = vld1q_f32(kernel3); #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld2 {v4.4s, v5.4s}, [%5], #32 \n" "ld2 {v6.4s, v7.4s}, [%5], #32 \n" "and v5.16b, v6.16b, v6.16b \n"// v4 v5 "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" "fmla v8.4s, v4.4s, %18.s[0] \n" "fmla v9.4s, v5.4s, %18.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2] \n" "fmla v10.4s, v4.4s, %19.s[0] \n" "fmla v11.4s, v5.4s, %19.s[0] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4s, v13.4s}, [%3] \n" "fmla v12.4s, v4.4s, %20.s[0] \n" "fmla v13.4s, v5.4s, %20.s[0] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v14.4s, v15.4s}, [%4] \n" "prfm pldl1keep, [%6, #512] \n" "ld2 {v6.4s, v7.4s}, [%6], #32 \n" "fmla v14.4s, v4.4s, %21.s[0] \n" "fmla v15.4s, v5.4s, %21.s[0] \n" "ld2 {v4.4s, v5.4s}, [%6], #32 \n" "and v7.16b, v4.16b, v4.16b \n"// v6 v7 "fmla v8.4s, v6.4s, %18.s[1] \n" "fmla v9.4s, v7.4s, %18.s[1] \n" "fmla v10.4s, v6.4s, %19.s[1] \n" "fmla v11.4s, v7.4s, %19.s[1] \n" "fmla v12.4s, v6.4s, %20.s[1] \n" "fmla v13.4s, v7.4s, %20.s[1] \n" "prfm pldl1keep, [%7, #512] \n" "ld2 {v4.4s, v5.4s}, [%7], #32 \n" "fmla v14.4s, v6.4s, %21.s[1] \n" "fmla v15.4s, v7.4s, %21.s[1] \n" "ld2 {v6.4s, v7.4s}, [%7], #32 \n" "and v5.16b, v6.16b, v6.16b \n"// v4 v5 "fmla v8.4s, v4.4s, %18.s[2] \n" "fmla v9.4s, v5.4s, %18.s[2] \n" "fmla v10.4s, v4.4s, %19.s[2] \n" "fmla v11.4s, v5.4s, %19.s[2] \n" "fmla v12.4s, v4.4s, %20.s[2] \n" "fmla v13.4s, v5.4s, %20.s[2] \n" "prfm pldl1keep, [%8, #512] \n" "ld2 {v6.4s, v7.4s}, [%8], #32 \n" "fmla v14.4s, v4.4s, %21.s[2] \n" "fmla v15.4s, v5.4s, %21.s[2] \n" "ld2 {v4.4s, v5.4s}, [%8], #32 \n" "and v7.16b, v4.16b, v4.16b \n"// v6 v7 "fmla v8.4s, v6.4s, %18.s[3] \n" "fmla v9.4s, v7.4s, %18.s[3] \n" "fmla v10.4s, v6.4s, %19.s[3] \n" "fmla v11.4s, v7.4s, %19.s[3] \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "fmla v12.4s, v6.4s, %20.s[3] \n" "fmla v13.4s, v7.4s, %20.s[3] \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "fmla v14.4s, v6.4s, %21.s[3] \n" "fmla v15.4s, v7.4s, %21.s[3] \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k0), // %18 "w"(_k1), // %19 "w"(_k2), // %20 "w"(_k3) // %21 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "0: \n" "pld [%5, #512] \n" "vld2.f32 {d8-d11}, [%5]! \n" "vld2.f32 {d12-d15}, [%5]! \n" "vand q5, q6, q6 \n"// q4 q5 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1] \n" "vmla.f32 q8, q4, %e18[0] \n" "vmla.f32 q9, q5, %e18[0] \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2] \n" "vmla.f32 q10, q4, %e19[0] \n" "vmla.f32 q11, q5, %e19[0] \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3] \n" "vmla.f32 q12, q4, %e20[0] \n" "vmla.f32 q13, q5, %e20[0] \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4] \n" "pld [%6, #512] \n" "vld2.f32 {d12-d15}, [%6]! \n" "vmla.f32 q14, q4, %e21[0] \n" "vmla.f32 q15, q5, %e21[0] \n" "vld2.f32 {d8-d11}, [%6]! \n" "vand q7, q4, q4 \n"// q6 q7 "vmla.f32 q8, q6, %e18[1] \n" "vmla.f32 q9, q7, %e18[1] \n" "vmla.f32 q10, q6, %e19[1] \n" "vmla.f32 q11, q7, %e19[1] \n" "vmla.f32 q12, q6, %e20[1] \n" "vmla.f32 q13, q7, %e20[1] \n" "pld [%7, #512] \n" "vld2.f32 {d8-d11}, [%7]! \n" "vmla.f32 q14, q6, %e21[1] \n" "vmla.f32 q15, q7, %e21[1] \n" "vld2.f32 {d12-d15}, [%7]! \n" "vand q5, q6, q6 \n"// q4 q5 "vmla.f32 q8, q4, %f18[0] \n" "vmla.f32 q9, q5, %f18[0] \n" "vmla.f32 q10, q4, %f19[0] \n" "vmla.f32 q11, q5, %f19[0] \n" "vmla.f32 q12, q4, %f20[0] \n" "vmla.f32 q13, q5, %f20[0] \n" "pld [%8, #512] \n" "vld2.f32 {d12-d15}, [%8]! \n" "vmla.f32 q14, q4, %f21[0] \n" "vmla.f32 q15, q5, %f21[0] \n" "vld2.f32 {d8-d11}, [%8]! \n" "vand q7, q4, q4 \n"// q6 q7 "vmla.f32 q8, q6, %f18[1] \n" "vmla.f32 q9, q7, %f18[1] \n" "vmla.f32 q10, q6, %f19[1] \n" "vmla.f32 q11, q7, %f19[1] \n" "vst1.f32 {d16-d19}, [%1]! \n" "vmla.f32 q12, q6, %f20[1] \n" "vmla.f32 q13, q7, %f20[1] \n" "vst1.f32 {d20-d23}, [%2]! \n" "vmla.f32 q14, q6, %f21[1] \n" "vmla.f32 q15, q7, %f21[1] \n" "vst1.f32 {d24-d27}, [%3]! \n" "subs %0, #1 \n" "vst1.f32 {d28-d31}, [%4]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k0), // %18 "w"(_k1), // %19 "w"(_k2), // %20 "w"(_k3) // %21 : "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { // TODO neon optimize float sum0 = *r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3]; float sum1 = *r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3]; float sum2 = *r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3]; float sum3 = *r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3]; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; r0 += 2; r1 += 2; r2 += 2; r3 += 2; outptr0++; outptr1++; outptr2++; outptr3++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } for (; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float* kernel1 = kernel + (p+1)*inch + q; const float* kernel2 = kernel + (p+2)*inch + q; const float* kernel3 = kernel + (p+3)*inch + q; const float k0 = kernel0[0]; const float k1 = kernel1[0]; const float k2 = kernel2[0]; const float k3 = kernel3[0]; const float* r0 = img0; for (int i = 0; i < outh; i++) { int size = outw; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld2 {v4.4s, v5.4s}, [%5], #32 \n" "ld2 {v6.4s, v7.4s}, [%5], #32 \n" "and v5.16b, v6.16b, v6.16b \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" "fmla v8.4s, v4.4s, %12.4s \n" "fmla v9.4s, v5.4s, %12.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2] \n" "fmla v10.4s, v4.4s, %13.4s \n" "fmla v11.4s, v5.4s, %13.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4s, v13.4s}, [%3] \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "fmla v12.4s, v4.4s, %14.4s \n" "fmla v13.4s, v5.4s, %14.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v14.4s, v15.4s}, [%4] \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "fmla v14.4s, v4.4s, %15.4s \n" "fmla v15.4s, v5.4s, %15.4s \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "0: \n" "pld [%5, #512] \n" "vld2.f32 {d8-d11}, [%5]! \n" "vld2.f32 {d12-d15}, [%5]! \n" "vand q5, q6, q6 \n"// q4 q5 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1] \n" "vmla.f32 q8, q4, %q12 \n" "vmla.f32 q9, q5, %q12 \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2] \n" "vmla.f32 q10, q4, %q13 \n" "vmla.f32 q11, q5, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3] \n" "vst1.f32 {d16-d19}, [%1]! \n" "vmla.f32 q12, q4, %q14 \n" "vmla.f32 q13, q5, %q14 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4] \n" "vst1.f32 {d20-d23}, [%2]! \n" "vmla.f32 q14, q4, %q15 \n" "vmla.f32 q15, q5, %q15 \n" "vst1.f32 {d24-d27}, [%3]! \n" "subs %0, #1 \n" "vst1.f32 {d28-d31}, [%4]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { // TODO neon optimize float sum0 = *r0 * k0; float sum1 = *r0 * k1; float sum2 = *r0 * k2; float sum3 = *r0 * k3; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; r0 += 2; outptr0++; outptr1++; outptr2++; outptr3++; } r0 += tailstep; } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+3<inch; q+=4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #512] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4s, v1.4s}, [%1] \n" "fmla v0.4s, v2.4s, %12.4s \n" "fmla v1.4s, v8.4s, %12.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld2 {v2.4s, v3.4s}, [%3], #32 \n" "ld2 {v8.4s, v9.4s}, [%3], #32 \n" "fmla v0.4s, v2.4s, %13.4s \n" "fmla v1.4s, v8.4s, %13.4s \n" "prfm pldl1keep, [%4, #512] \n" "ld2 {v2.4s, v3.4s}, [%4], #32 \n" "ld2 {v8.4s, v9.4s}, [%4], #32 \n" "fmla v0.4s, v2.4s, %14.4s \n" "fmla v1.4s, v8.4s, %14.4s \n" "prfm pldl1keep, [%5, #512] \n" "ld2 {v2.4s, v3.4s}, [%5], #32 \n" "ld2 {v8.4s, v9.4s}, [%5], #32 \n" "fmla v0.4s, v2.4s, %15.4s \n" "fmla v1.4s, v8.4s, %15.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" "bne 0b \n" "sub %2, %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9" ); } #else if (nn > 0) { asm volatile( "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1] \n" "vmla.f32 q0, q2, %q12 \n" "vmla.f32 q1, q8, %q12 \n" "pld [%3, #512] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vld2.f32 {d16-d19}, [%3]! \n" "vmla.f32 q0, q2, %q13 \n" "vmla.f32 q1, q8, %q13 \n" "pld [%4, #512] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vld2.f32 {d16-d19}, [%4]! \n" "vmla.f32 q0, q2, %q14 \n" "vmla.f32 q1, q8, %q14 \n" "pld [%5, #512] \n" "vld2.f32 {d4-d7}, [%5]! \n" "vld2.f32 {d16-d19}, [%5]! \n" "vmla.f32 q0, q2, %q15 \n" "vmla.f32 q1, q8, %q15 \n" "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1]! \n" "bne 0b \n" "sub %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0 += 2; r1 += 2; r2 += 2; r3 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float* r0 = img0; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #512] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4s, v1.4s}, [%1] \n" "fmla v0.4s, v2.4s, %6.4s \n" "fmla v1.4s, v8.4s, %6.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" "bne 0b \n" "sub %2, %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9" ); } #else if (nn > 0) { asm volatile( "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1] \n" "vmla.f32 q0, q2, %q6 \n" "vmla.f32 q1, q8, %q6 \n" "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1]! \n" "bne 0b \n" "sub %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; *outptr += sum; r0 += 2; outptr++; } r0 += tailstep; } } } }
naive_prefix_lcs.h
// // Created by nikita on 30.07.2020. // #ifndef CPU_NAIVE_PREFIX_LCS_H #define CPU_NAIVE_PREFIX_LCS_H #include <vector> #include <cmath> /** * * @tparam Input * @param a * @param b * @return */ template<class Input> int naive_prefix_lcs(std::vector<Input> a, std::vector<Input> b) { int arr[a.size() + 1][b.size() + 1]; auto m = a.size() + 1; auto n = b.size() + 1; for (auto j = 0; j < n; j++) { arr[0][j] = 0; } for (auto i = 0; i < m; i++) { arr[i][0] = 0; } for (int i = 1; i < m; ++i) { for (int j = 1; j < n; ++j) { arr[i][j] = std::max(std::max(arr[i - 1][j], arr[i][j - 1]), (a[i - 1] == b[j - 1]) ? arr[i - 1][j - 1] + 1 : arr[i - 1][j - 1]); } } return arr[m - 1][n - 1]; } /** * * @tparam Input * @param a * @param b * @return */ int prefix_lcs_sequential(int *a, int a_size, int *b, int b_size) { int *input_a; int *input_b; int m, n; if (a > b) { m = a_size + 1; n = b_size + 1; input_a = a; input_b = b; } else { n = a_size + 1; m = b_size + 1; input_b = a; input_a = b; } auto prev_row = new int[n]; auto cur_row = new int[n]; for (int i = 0; i < n; ++i) { cur_row[i] = 0; prev_row[i] = 0; } for (int i = 1; i < m; ++i) { auto l = 0; for (int j = 1; j < n; ++j) { cur_row[j] = std::max( std::max(prev_row[j], l), (input_a[i - 1] == input_b[j - 1]) ? prev_row[j - 1] + 1 : prev_row[j - 1] ); l = cur_row[j]; } std::swap(prev_row, cur_row); } return prev_row[n - 1]; } int prefix_lcs_sequential_skewed(int *a, int a_size, int *b, int b_size) { // check special case 2x2 if (a_size > b_size) { return prefix_lcs_sequential_skewed(b, b_size, a, a_size); } if (a_size == 1 && b_size == 1) { return a[0] == b[0] ? 1 : 0; } auto diagonal_size = 1 + std::min(a_size, b_size); auto a1 = new int[diagonal_size]; auto a2 = new int[diagonal_size]; auto a3 = new int[diagonal_size]; auto pos_i = 0; auto pos_j = 0; auto start_i = pos_i; auto start_j = pos_j; auto min = std::min(a_size, b_size); auto num_diag = a_size + b_size; auto total_same_length_diag = num_diag - (min + 1) - min; // init step for (int k = 0; k < diagonal_size; ++k) { a3[k] = 0; a2[k] = 0; a1[k] = 0; } start_i--; // fill upper square for (int k = 2; k <= diagonal_size; ++k, start_i++) { pos_i = start_i; pos_j = 0; a3[0] = 0; a3[k - 1] = 0; //#pragma omp simd for (int i = 1; i < k - 1; ++i) { a3[i] = std::max( std::max(a2[i], a2[i - 1]), (a[pos_i] == b[pos_j]) ? 1 + a1[i - 1] : a1[i - 1] ); pos_i--; pos_j++; } std::swap(a1, a2); std::swap(a3, a2); } // phase 2:: fill if (a_size >= b_size) { // same pattern for (int k = 0; k < total_same_length_diag; ++k, start_i++) { pos_i = start_i; pos_j = 0; a3[0] = 0; for (int i = 1; i < diagonal_size; ++i) { a3[i] = std::max( std::max(a2[i], a2[i - 1]), (a[pos_i] == b[pos_j]) ? 1 + a1[i - 1] : a1[i - 1] ); pos_i--; pos_j++; } std::swap(a1, a2); std::swap(a3, a2); } } // special case when: // a==b => |a1| = c-1 , |a2| = c, |a3|= c-1 or // a>b => |a1| = c, |a2| = c, |a3| = c-1 // a<b -> |a1| = c - 1, |a2| = c, |a3| = c pos_i = start_i; pos_j = 0; if (a_size < b_size) { a3[diagonal_size - 1] = 0; } for (int i = 0; i < diagonal_size - 1; ++i) { a3[i] = std::max( std::max(a2[i], a2[i + 1]), (a[pos_i] == b[pos_j]) ? 1 + a1[i] : a1[i] ); pos_i--; pos_j++; } start_j++; std::swap(a1, a2); std::swap(a3, a2); if (a_size < b_size) { // since special case then -1 for (int k = 0; k < total_same_length_diag; ++k, start_j++) { pos_i = start_i; pos_j = start_j; a3[diagonal_size - 1] = 0; for (int i = 0; i < diagonal_size - 1; ++i) { a3[i] = std::max( std::max(a2[i], a2[i + 1]), (a[pos_i] == b[pos_j]) ? 1 + a1[i + 1] : a1[i + 1] ); pos_i--; pos_j++; } std::swap(a1, a2); std::swap(a3, a2); } } if (a_size >= b_size) diagonal_size -= 1; // phase 3 // pattern a3[i] = max(a1[i+1],a2[i],a2[i-1]) for (int size = diagonal_size - 1; size > 1; size--, start_j++) { pos_i = start_i; pos_j = start_j; for (int i = 0; i < size; ++i) { a3[i] = std::max( std::max(a2[i], a2[i + 1]), (a[pos_i] == b[pos_j]) ? 1 + a1[i + 1] : a1[i + 1] ); pos_i--; pos_j++; } std::swap(a1, a2); std::swap(a3, a2); } // need to calculate last one cell return std::max(std::max(a2[0], a2[1]), (a[a_size - 1]) == b[b_size - 1] ? 1 + a1[1] : a1[1]); } #endif //CPU_NAIVE_PREFIX_LCS_H
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 32; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
laplace2d.c
/* * Copyright 2012 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <math.h> #include <string.h> #include <openacc.h> #include "timer.h" #define NN 4096 #define NM 4096 double A[NN][NM]; double Anew[NN][NM]; int main(int argc, char** argv) { const int n = NN; const int m = NM; const int iter_max = 1000; const double tol = 1.0e-6; double error = 1.0; memset(A, 0, n * m * sizeof(double)); memset(Anew, 0, n * m * sizeof(double)); for (int j = 0; j < n; j++) { A[j][0] = 1.0; Anew[j][0] = 1.0; } printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m); StartTimer(); int iter = 0; while ( error > tol && iter < iter_max ) { error = 0.0; #pragma omp parallel for shared(m, n, Anew, A) for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1] + A[j-1][i] + A[j+1][i]); error = fmax( error, fabs(Anew[j][i] - A[j][i])); } } #pragma omp parallel for shared(m, n, Anew, A) for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { A[j][i] = Anew[j][i]; } } if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error); iter++; } double runtime = GetTimer(); printf(" total: %f s\n", runtime / 1000); }
taskdep_untied_threadid2.c
// RUN: %libomp-compile-and-run // REQUIRES: abt #include "omp_testsuite.h" #include <stdio.h> #include <stdlib.h> #include <string.h> int calc_seq(int n) { int i, j, *buffer = (int *)malloc(sizeof(int) * n * n); for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { if (i == 0 && j == 0) { buffer[i * n + j] = 1; } else if (i == 0) { buffer[i * n + j] = buffer[i * n + (j - 1)]; } else if (j == 0) { buffer[i * n + j] = buffer[(i - 1) * n + j]; } else { buffer[i * n + j] = buffer[(i - 1) * n + j] + buffer[i * n + (j - 1)]; } } } int ret = buffer[(n - 1) * n + (n - 1)]; free(buffer); return ret; } #define TASK_UNTIED_CHECK(val_index) \ do { \ ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_threads[(val_index)])); \ \ _Pragma("omp taskyield") \ \ ABT_thread abt_thread = abt_threads[(val_index)]; \ int omp_thread_id2 = omp_get_thread_num(); \ ABT_thread abt_thread2; \ ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread2)); \ ABT_bool abt_thread_equal; \ ABT_EXIT_IF_FAIL(ABT_thread_equal(abt_thread, abt_thread2, \ &abt_thread_equal)); \ if (abt_thread_equal == ABT_TRUE) { \ vals[(val_index)] += 1; \ } \ \ ABT_EXIT_IF_FAIL(ABT_thread_yield()); \ \ int omp_thread_id3 = omp_get_thread_num(); \ if (omp_thread_id2 == omp_thread_id3) { \ vals[(val_index)] += 2; \ } \ } while (0) int test_taskdep_untied_threadid2(int num_threads) { int n = 10; int seq_val, task_val; int vals[n * n]; ABT_thread abt_threads[n * n]; memset(vals, 0, sizeof(int) * n * n); #pragma omp parallel shared(task_val) firstprivate(n) num_threads(num_threads) #pragma omp master { int i, j; int *A_buf = (int *)malloc(sizeof(int) * n * n); int **A = (int **)malloc(sizeof(int *) * n); for(i = 0; i < n; i++) { A[i] = A_buf + (i * n); for(j = 0; j < n; j++) { // Assign random values. A[i][j] = i * n + j; } } // A[i][j] is the root task. for(i = 0; i < n; i++) { for(j = 0; j < n; j++) { if (i == 0 && j == 0) { #pragma omp task depend(out:A[i][j]) firstprivate(A, i, j) untied { TASK_UNTIED_CHECK(i * n + j); A[i][j] = 1; } } else if (i == 0) { #pragma omp task depend(in:A[i][j - 1]) depend(out:A[i][j]) \ firstprivate(A, i, j) untied { TASK_UNTIED_CHECK(i * n + j); A[i][j] = A[i][j - 1]; } } else if (j == 0) { #pragma omp task depend(in:A[i - 1][j]) depend(out:A[i][j]) \ firstprivate(A, i, j) untied { TASK_UNTIED_CHECK(i * n + j); A[i][j] = A[i - 1][j]; } } else { #pragma omp task depend(in:A[i - 1][j], A[i][j - 1]) \ depend(out:A[i][j]) untied { TASK_UNTIED_CHECK(i * n + j); A[i][j] = A[i - 1][j] + A[i][j - 1]; } } } } #pragma omp taskwait task_val = A[n - 1][n - 1]; free(A); free(A_buf); } seq_val = calc_seq(n); if(seq_val != task_val) { printf("[%d] Failed: route(%d) = %d (ANS = %d)\n", num_threads, n, task_val, seq_val); return 0; } int index; for (index = 0; index < n * n; index++) { if (vals[index] != 3) { printf("vals[%d] == %d\n", index, vals[index]); return 0; } } return 1; } int main() { int i; int num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_taskdep_untied_threadid2(i + 1)) { num_failed++; } } return num_failed; }
GB_binop__times_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_fc64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__times_fc64) // A.*B function (eWiseMult): GB (_AemultB_03__times_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_fc64) // A*D function (colscale): GB (_AxD__times_fc64) // D*A function (rowscale): GB (_DxB__times_fc64) // C+=B function (dense accum): GB (_Cdense_accumB__times_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__times_fc64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_fc64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_fc64) // C=scalar+B GB (_bind1st__times_fc64) // C=scalar+B' GB (_bind1st_tran__times_fc64) // C=A+scalar GB (_bind2nd__times_fc64) // C=A'+scalar GB (_bind2nd_tran__times_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_FC64_mul (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_FC64_mul (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_FC64 || GxB_NO_TIMES_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__times_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_fc64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_fc64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__times_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__times_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = Bx [p] ; Cx [p] = GB_FC64_mul (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = Ax [p] ; Cx [p] = GB_FC64_mul (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_FC64_mul (x, aij) ; \ } GrB_Info GB (_bind1st_tran__times_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_FC64_mul (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__times_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cpd_omp.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <HiParTI.h> #include <assert.h> #include <math.h> #ifdef HIPARTI_USE_MAGMA #include "magma_v2.h" #include "magma_lapack.h" #else #include "clapack.h" #endif #include "hicoo.h" #ifdef HIPARTI_USE_OPENMP double OmpCpdAlsStepHiCOO( ptiSparseTensorHiCOO const * const hitsr, ptiIndex const rank, ptiIndex const niters, double const tol, const int tk, const int tb, const int * par_iters, ptiRankMatrix ** mats, ptiRankMatrix *** copy_mats, ptiValue * const lambda, int balanced) { ptiIndex const nmodes = hitsr->nmodes; ptiIndex const stride = mats[0]->stride; double fit = 0; omp_set_num_threads(tk); #ifdef HIPARTI_USE_MAGMA magma_set_omp_numthreads(tk); magma_set_lapack_numthreads(tk); // printf("magma nthreads: %d\n", magma_get_parallel_numthreads()); // printf("magma nthreads: %d\n", magma_get_omp_numthreads()); // printf("magma lapack nthreads: %d\n", magma_get_lapack_numthreads()); #endif // ptiAssert(stride == rank); // for correct column-major magma functions for(ptiIndex m=0; m < nmodes; ++m) { ptiAssert(hitsr->ndims[m] == mats[m]->nrows); ptiAssert(mats[m]->ncols == rank); } ptiValue alpha = 1.0, beta = 0.0; char notrans = 'N'; // char trans = 'T'; char uplo = 'L'; int blas_rank = (int) rank; int blas_stride = (int) stride; ptiRankMatrix * tmp_mat = mats[nmodes]; ptiRankMatrix ** ata = (ptiRankMatrix **)malloc((nmodes+1) * sizeof(*ata)); for(ptiIndex m=0; m < nmodes+1; ++m) { ata[m] = (ptiRankMatrix *)malloc(sizeof(ptiRankMatrix)); ptiAssert(ptiNewRankMatrix(ata[m], rank, rank) == 0); ptiAssert(mats[m]->stride == ata[m]->stride); } /* Compute all "ata"s */ for(ptiIndex m=0; m < nmodes; ++m) { /* ata[m] = mats[m]^T * mats[m]), actually do A * A' due to row-major mats, and output an upper triangular matrix. */ int blas_nrows = (int)(mats[m]->nrows); ssyrk_(&uplo, &notrans, &blas_rank, &blas_nrows, &alpha, mats[m]->values, &blas_stride, &beta, ata[m]->values, &blas_stride); } // printf("Initial mats:\n"); // for(size_t m=0; m < nmodes+1; ++m) // ptiDumpRankMatrix(mats[m], stdout); // printf("Initial ata:\n"); // for(ptiIndex m=0; m < nmodes+1; ++m) // ptiDumpRankMatrix(ata[m], stdout); double oldfit = 0; ptiIndex * mats_order = (ptiIndex*)malloc(nmodes * sizeof(*mats_order)); ptiTimer tmp_timer; ptiNewTimer(&tmp_timer, 0); double mttkrp_time, solver_time, norm_time, ata_time, fit_time; // double sum_time = 0.0; for(ptiIndex it=0; it < niters; ++it) { // printf(" its = %3lu\n", it+1); // sum_time = 0.0; ptiTimer timer; ptiNewTimer(&timer, 0); ptiStartTimer(timer); for(ptiIndex m=0; m < nmodes; ++m) { // printf("\nmode %u \n", m); tmp_mat->nrows = mats[m]->nrows; /* Factor Matrices order */ mats_order[0] = m; for(ptiIndex i=1; i<nmodes; ++i) mats_order[i] = (m+i) % nmodes; // ptiAssert (ptiOmpMTTKRPHiCOO_MatrixTiling(hitsr, mats, mats_order, m) == 0); ptiStartTimer(tmp_timer); if(par_iters[m] == 1) { ptiAssert (ptiOmpMTTKRPHiCOO_MatrixTiling_Scheduled_Reduce(hitsr, mats, copy_mats[m], mats_order, m, tk, tb, balanced) == 0); } else { ptiAssert (ptiOmpMTTKRPHiCOO_MatrixTiling_Scheduled(hitsr, mats, mats_order, m, tk, tb, balanced) == 0); } ptiStopTimer(tmp_timer); mttkrp_time = ptiPrintElapsedTime(tmp_timer, "MTTKRP"); // printf("ptiMTTKRPHiCOO_MatrixTiling mats[nmodes]:\n"); // ptiDumpRankMatrix(mats[nmodes], stdout); ptiStartTimer(tmp_timer); #ifdef HIPARTI_USE_OPENMP #pragma omp parallel for num_threads(tk) #endif for(ptiIndex i=0; i<mats[m]->nrows * stride; ++i) mats[m]->values[i] = tmp_mat->values[i]; /* Solve ? * ata[nmodes] = mats[nmodes] (tmp_mat) */ /* result is row-major, solve AT XT = BT */ ptiAssert ( ptiRankMatrixSolveNormals(m, nmodes, ata, mats[m]) == 0 ); ptiStopTimer(tmp_timer); // solver_time = ptiPrintElapsedTime(tmp_timer, "memcpy and ptiRankMatrixSolveNormals"); // printf("Inverse mats[m]:\n"); // ptiDumpRankMatrix(mats[m], stdout); /* Normalized mats[m], store the norms in lambda. Use different norms to avoid precision explosion. */ ptiStartTimer(tmp_timer); if (it == 0 ) { ptiRankMatrix2Norm(mats[m], lambda); } else { ptiRankMatrixMaxNorm(mats[m], lambda); } ptiStopTimer(tmp_timer); // norm_time = ptiPrintElapsedTime(tmp_timer, "matrix norm"); // printf("Normalize mats[m]:\n"); // ptiDumpRankMatrix(mats[m], stdout); // printf("lambda:\n"); // for(size_t i=0; i<rank; ++i) // printf("%lf ", lambda[i]); // printf("\n\n"); /* ata[m] = mats[m]^T * mats[m]) */ ptiStartTimer(tmp_timer); int blas_nrows = (int)(mats[m]->nrows); ssyrk_(&uplo, &notrans, &blas_rank, &blas_nrows, &alpha, mats[m]->values, &blas_stride, &beta, ata[m]->values, &blas_stride); ptiStopTimer(tmp_timer); // ata_time = ptiPrintElapsedTime(tmp_timer, "update ata"); // printf("Update ata[m]:\n"); // ptiDumpRankMatrix(ata[m], stdout); // sum_time += mttkrp_time + norm_time + ata_time; } // Loop nmodes // PrintDenseValueVector(lambda, rank, "lambda", "debug.txt"); ptiStartTimer(tmp_timer); fit = KruskalTensorFitHiCOO(hitsr, lambda, mats, ata); ptiStopTimer(tmp_timer); // fit_time = ptiPrintElapsedTime(tmp_timer, "KruskalTensorFitHiCOO"); ptiStopTimer(timer); double its_time = ptiElapsedTime(timer); ptiFreeTimer(timer); printf(" its = %3u ( %.3lf s ) fit = %0.5f delta = %+0.4e\n", it+1, its_time, fit, fit - oldfit); if(it > 0 && fabs(fit - oldfit) < tol) { break; } oldfit = fit; } // Loop niters GetRankFinalLambda(rank, nmodes, mats, lambda); for(ptiIndex m=0; m < nmodes+1; ++m) { ptiFreeRankMatrix(ata[m]); } free(ata); free(mats_order); return fit; } int ptiOmpCpdAlsHiCOO( ptiSparseTensorHiCOO const * const hitsr, ptiIndex const rank, ptiIndex const niters, double const tol, const int tk, const int tb, int balanced, ptiRankKruskalTensor * ktensor) { ptiIndex nmodes = hitsr->nmodes; #ifdef HIPARTI_USE_MAGMA magma_init(); #endif /* Initialize factor matrices */ ptiIndex max_dim = 0; for(ptiIndex m=0; m < nmodes; ++m) { max_dim = (hitsr->ndims[m] > max_dim) ? hitsr->ndims[m] : max_dim; } ptiRankMatrix ** mats = (ptiRankMatrix **)malloc((nmodes+1) * sizeof(*mats)); for(ptiIndex m=0; m < nmodes+1; ++m) { mats[m] = (ptiRankMatrix *)malloc(sizeof(ptiRankMatrix)); } for(ptiIndex m=0; m < nmodes; ++m) { ptiAssert(ptiNewRankMatrix(mats[m], hitsr->ndims[m], rank) == 0); // assert(ptiConstantRankMatrix(mats[m], 1) == 0); ptiAssert(ptiRandomizeRankMatrix(mats[m], hitsr->ndims[m], rank) == 0); } ptiAssert(ptiNewRankMatrix(mats[nmodes], max_dim, rank) == 0); ptiAssert(ptiConstantRankMatrix(mats[nmodes], 0) == 0); /* determine niters or num_kernel_dim to be parallelized */ int * par_iters = (int *)malloc(nmodes * sizeof(*par_iters)); ptiIndex sk = (ptiIndex)pow(2, hitsr->sk_bits); for(ptiIndex m=0; m < nmodes; ++m) { par_iters[m] = 0; ptiIndex num_kernel_dim = (hitsr->ndims[m] + sk - 1) / sk; // printf("num_kernel_dim: %u, hitsr->nkiters[m] / num_kernel_dim: %u\n", num_kernel_dim, hitsr->nkiters[m]/num_kernel_dim); if(num_kernel_dim <= PAR_MIN_DEGREE * NUM_CORES && hitsr->nkiters[m] / num_kernel_dim >= PAR_DEGREE_REDUCE) { par_iters[m] = 1; } } printf("par_iters:\n"); for(ptiIndex m=0; m < nmodes; ++m) { printf("%d, ", par_iters[m]); } printf("\n"); ptiRankMatrix *** copy_mats = (ptiRankMatrix ***)malloc(nmodes * sizeof(*copy_mats)); for(ptiIndex m=0; m < nmodes; ++m) { if (par_iters[m] == 1) { copy_mats[m] = (ptiRankMatrix **)malloc(tk * sizeof(ptiRankMatrix*)); for(int t=0; t<tk; ++t) { copy_mats[m][t] = (ptiRankMatrix *)malloc(sizeof(ptiRankMatrix)); ptiAssert(ptiNewRankMatrix(copy_mats[m][t], hitsr->ndims[m], rank) == 0); ptiAssert(ptiConstantRankMatrix(copy_mats[m][t], 0) == 0); } } } ptiTimer timer; ptiNewTimer(&timer, 0); ptiStartTimer(timer); ktensor->fit = OmpCpdAlsStepHiCOO(hitsr, rank, niters, tol, tk, tb, par_iters, mats, copy_mats, ktensor->lambda, balanced); ptiStopTimer(timer); ptiPrintElapsedTime(timer, "CPU HiCOO SpTns CPD-ALS"); ptiFreeTimer(timer); ktensor->factors = mats; #ifdef HIPARTI_USE_MAGMA magma_finalize(); #endif ptiFreeRankMatrix(mats[nmodes]); for(ptiIndex m=0; m < nmodes; ++m) { if(par_iters[m] == 1) { for(int t=0; t<tk; ++t) { ptiFreeRankMatrix(copy_mats[m][t]); free(copy_mats[m][t]); } free(copy_mats[m]); } } free(copy_mats); return 0; } #endif
jacobi-1d-imper.limlam.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #define N 2000000 #define T 1000 #pragma declarations double a[N]; double b[N]; #pragma enddeclarations #ifdef PERFCTR #include <papi.h> #include "papi_defs.h" #endif #include "util.h" #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define S1(zT0,zT1,t,i) {b[i]=((double)(333))/1000*(a[1+i]+a[i]+a[i-1]);} #define S2(zT0,zT1,t,j) {a[j]=b[j];} int main() { assert(N >= 1000); assert(T >= 1000); int c1, c2, c3, c4, c5; int i, j, k, l, t; register int lb, ub; #ifdef TEST init_array() ; #endif #ifdef PERFCTR PERF_INIT; #endif /* Generated from jacobi-imper.par.cloog by CLooG v0.14.1 64 bits in 0.11s. */ for (c1=-1;c1<=floord(2*N+5*T-8,2048);c1++) { lb = max(max(max(ceild(6144*c1-N-4092,10240),ceild(2048*c1-2047,4096)),0),ceild(2048*c1-N-2*T+4,2048)); ub = min(min(min(floord(2048*c1+2045,2048),floord(2048*c1+T+2047,4096)),floord(N+3*T-4,2048)),floord(6144*c1+6141,10240)); #pragma omp parallel for shared(c1,lb,ub,a,b) private(i,j,k,l,c2,c3,c4,c5) default(none) schedule(static) for (c2=lb;c2<=ub;c2++) { if ((c1 <= floord(4096*c2-T,2048)) && (c2 >= ceild(3*T,2048))) { c3 = 2048*c2-T ; c4 = 2048*c2 ; c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = T-1 ; l = 2048*c2-3*T+2 ; S2(-c1+2*c2,3*c1-5*c2,T-1,2048*c2-3*T+2) ; } for (c3=max(max(ceild(4096*c2,3),2),2048*c1-2048*c2);c3<=min(min(2048*c1-2048*c2+2047,floord(4096*c2+1,3)),2*T-1);c3++) { for (c4=max(2048*c2,c3+1);c4<=floord(3*c3,2);c4++) { c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4-1 ; l = 3*c3-2*c4+2 ; S2(-c1+2*c2,3*c1-5*c2,-c3+c4-1,3*c3-2*c4+2) ; } } for (c3=max(max(2048*c2,2),2048*c1-2048*c2);c3<=min(min(3,2048*c1-2048*c2+2047),2048*c2+2046);c3++) { c4 = c3 ; c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = 0 ; l = c3 ; S1(-c1+2*c2,3*c1-5*c2,0,c3) ; for (c4=c3+1;c4<=min(floord(3*c3,2),2048*c2+2047);c4++) { c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4-1 ; l = 3*c3-2*c4+2 ; S2(-c1+2*c2,3*c1-5*c2,-c3+c4-1,3*c3-2*c4+2) ; } } for (c3=max(max(ceild(4096*c2+2,3),2048*c2-T+1),2048*c1-2048*c2);c3<=min(min(min(min(2048*c2-1,floord(4096*c2+N-4,3)),2048*c1-2048*c2+2047),floord(4096*c2+4095,3)),2*T+1);c3++) { for (c4=2048*c2;c4<=min(c3+T-1,floord(3*c3-2,2));c4++) { c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4 ; l = 3*c3-2*c4 ; S1(-c1+2*c2,3*c1-5*c2,-c3+c4,3*c3-2*c4) ; c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4-1 ; l = 3*c3-2*c4+2 ; S2(-c1+2*c2,3*c1-5*c2,-c3+c4-1,3*c3-2*c4+2) ; } for (c4=ceild(3*c3-1,2);c4<=min(min(c3+T,floord(3*c3,2)),2048*c2+2047);c4++) { c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4-1 ; l = 3*c3-2*c4+2 ; S2(-c1+2*c2,3*c1-5*c2,-c3+c4-1,3*c3-2*c4+2) ; } } for (c3=max(max(2048*c2,4),2048*c1-2048*c2);c3<=min(min(min(N-2,2048*c1-2048*c2+2047),floord(4096*c2+4095,3)),2*T+1);c3++) { c4 = c3 ; c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = 0 ; l = c3 ; S1(-c1+2*c2,3*c1-5*c2,0,c3) ; for (c4=c3+1;c4<=min(c3+T-1,floord(3*c3-2,2));c4++) { c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4 ; l = 3*c3-2*c4 ; S1(-c1+2*c2,3*c1-5*c2,-c3+c4,3*c3-2*c4) ; c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4-1 ; l = 3*c3-2*c4+2 ; S2(-c1+2*c2,3*c1-5*c2,-c3+c4-1,3*c3-2*c4+2) ; } for (c4=ceild(3*c3-1,2);c4<=min(min(c3+T,floord(3*c3,2)),2048*c2+2047);c4++) { c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4-1 ; l = 3*c3-2*c4+2 ; S2(-c1+2*c2,3*c1-5*c2,-c3+c4-1,3*c3-2*c4+2) ; } } for (c3=max(max(2*T+2,2048*c2-T+1),2048*c1-2048*c2);c3<=min(min(min(floord(4096*c2+N-4,3),2048*c2-1),2048*c1-2048*c2+2047),2048*c2-T+2047);c3++) { for (c4=2048*c2;c4<=c3+T-1;c4++) { c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4 ; l = 3*c3-2*c4 ; S1(-c1+2*c2,3*c1-5*c2,-c3+c4,3*c3-2*c4) ; c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4-1 ; l = 3*c3-2*c4+2 ; S2(-c1+2*c2,3*c1-5*c2,-c3+c4-1,3*c3-2*c4+2) ; } c4 = c3+T ; c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = T-1 ; l = c3-2*T+2 ; S2(-c1+2*c2,3*c1-5*c2,T-1,c3-2*T+2) ; } for (c3=max(2048*c1-2048*c2,ceild(4096*c2+4096,3));c3<=min(min(min(floord(4096*c2+N-4,3),2048*c2-1),2048*c1-2048*c2+2047),2*T+1);c3++) { for (c4=2048*c2;c4<=min(2048*c2+2047,c3+T-1);c4++) { c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4 ; l = 3*c3-2*c4 ; S1(-c1+2*c2,3*c1-5*c2,-c3+c4,3*c3-2*c4) ; c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4-1 ; l = 3*c3-2*c4+2 ; S2(-c1+2*c2,3*c1-5*c2,-c3+c4-1,3*c3-2*c4+2) ; } } for (c3=max(max(2048*c2,2*T+2),2048*c1-2048*c2);c3<=min(min(N-2,2048*c1-2048*c2+2047),2048*c2-T+2047);c3++) { c4 = c3 ; c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = 0 ; l = c3 ; S1(-c1+2*c2,3*c1-5*c2,0,c3) ; for (c4=c3+1;c4<=c3+T-1;c4++) { c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4 ; l = 3*c3-2*c4 ; S1(-c1+2*c2,3*c1-5*c2,-c3+c4,3*c3-2*c4) ; c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4-1 ; l = 3*c3-2*c4+2 ; S2(-c1+2*c2,3*c1-5*c2,-c3+c4-1,3*c3-2*c4+2) ; } c4 = c3+T ; c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = T-1 ; l = c3-2*T+2 ; S2(-c1+2*c2,3*c1-5*c2,T-1,c3-2*T+2) ; } for (c3=max(max(2048*c2,2048*c1-2048*c2),ceild(4096*c2+4096,3));c3<=min(min(min(N-2,2048*c1-2048*c2+2047),2048*c2+2046),2*T+1);c3++) { c4 = c3 ; c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = 0 ; l = c3 ; S1(-c1+2*c2,3*c1-5*c2,0,c3) ; for (c4=c3+1;c4<=min(2048*c2+2047,c3+T-1);c4++) { c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4 ; l = 3*c3-2*c4 ; S1(-c1+2*c2,3*c1-5*c2,-c3+c4,3*c3-2*c4) ; c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4-1 ; l = 3*c3-2*c4+2 ; S2(-c1+2*c2,3*c1-5*c2,-c3+c4-1,3*c3-2*c4+2) ; } } for (c3=max(max(ceild(4096*c2+N-3,3),N-1),2048*c1-2048*c2);c3<=min(min(2048*c1-2048*c2+2047,floord(4096*c2+4095,3)),2*T+1);c3++) { for (c4=max(2048*c2,ceild(3*c3-N+2,2));c4<=floord(3*c3-N+3,2);c4++) { c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4 ; l = 3*c3-2*c4 ; S1(-c1+2*c2,3*c1-5*c2,-c3+c4,3*c3-2*c4) ; } for (c4=ceild(3*c3-N+4,2);c4<=min(c3+T-1,floord(3*c3-2,2));c4++) { c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4 ; l = 3*c3-2*c4 ; S1(-c1+2*c2,3*c1-5*c2,-c3+c4,3*c3-2*c4) ; c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4-1 ; l = 3*c3-2*c4+2 ; S2(-c1+2*c2,3*c1-5*c2,-c3+c4-1,3*c3-2*c4+2) ; } for (c4=ceild(3*c3-1,2);c4<=min(min(c3+T,floord(3*c3,2)),2048*c2+2047);c4++) { c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4-1 ; l = 3*c3-2*c4+2 ; S2(-c1+2*c2,3*c1-5*c2,-c3+c4-1,3*c3-2*c4+2) ; } } for (c3=max(max(2048*c1-2048*c2,2*T+2),2048*c2-T+2048);c3<=min(min(floord(4096*c2+N-4,3),2048*c2-1),2048*c1-2048*c2+2047);c3++) { for (c4=2048*c2;c4<=2048*c2+2047;c4++) { c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4 ; l = 3*c3-2*c4 ; S1(-c1+2*c2,3*c1-5*c2,-c3+c4,3*c3-2*c4) ; c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4-1 ; l = 3*c3-2*c4+2 ; S2(-c1+2*c2,3*c1-5*c2,-c3+c4-1,3*c3-2*c4+2) ; } } for (c3=max(max(max(2048*c2,2048*c1-2048*c2),2*T+2),2048*c2-T+2048);c3<=min(min(N-2,2048*c1-2048*c2+2047),2048*c2+2046);c3++) { c4 = c3 ; c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = 0 ; l = c3 ; S1(-c1+2*c2,3*c1-5*c2,0,c3) ; for (c4=c3+1;c4<=2048*c2+2047;c4++) { c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4 ; l = 3*c3-2*c4 ; S1(-c1+2*c2,3*c1-5*c2,-c3+c4,3*c3-2*c4) ; c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4-1 ; l = 3*c3-2*c4+2 ; S2(-c1+2*c2,3*c1-5*c2,-c3+c4-1,3*c3-2*c4+2) ; } } for (c3=max(max(max(ceild(4096*c2+N-3,3),N-1),2*T+2),2048*c1-2048*c2);c3<=min(min(N+2*T-6,2048*c1-2048*c2+2047),2048*c2-T+2047);c3++) { for (c4=max(2048*c2,ceild(3*c3-N+2,2));c4<=floord(3*c3-N+3,2);c4++) { c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4 ; l = 3*c3-2*c4 ; S1(-c1+2*c2,3*c1-5*c2,-c3+c4,3*c3-2*c4) ; } for (c4=ceild(3*c3-N+4,2);c4<=c3+T-1;c4++) { c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4 ; l = 3*c3-2*c4 ; S1(-c1+2*c2,3*c1-5*c2,-c3+c4,3*c3-2*c4) ; c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4-1 ; l = 3*c3-2*c4+2 ; S2(-c1+2*c2,3*c1-5*c2,-c3+c4-1,3*c3-2*c4+2) ; } c4 = c3+T ; c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = T-1 ; l = c3-2*T+2 ; S2(-c1+2*c2,3*c1-5*c2,T-1,c3-2*T+2) ; } for (c3=max(max(max(ceild(4096*c2+N-3,3),2048*c1-2048*c2),N-1),ceild(4096*c2+4096,3));c3<=min(min(2048*c1-2048*c2+2047,floord(4096*c2+N+4090,3)),2*T+1);c3++) { for (c4=max(2048*c2,ceild(3*c3-N+2,2));c4<=floord(3*c3-N+3,2);c4++) { c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4 ; l = 3*c3-2*c4 ; S1(-c1+2*c2,3*c1-5*c2,-c3+c4,3*c3-2*c4) ; } for (c4=ceild(3*c3-N+4,2);c4<=min(2048*c2+2047,c3+T-1);c4++) { c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4 ; l = 3*c3-2*c4 ; S1(-c1+2*c2,3*c1-5*c2,-c3+c4,3*c3-2*c4) ; c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4-1 ; l = 3*c3-2*c4+2 ; S2(-c1+2*c2,3*c1-5*c2,-c3+c4-1,3*c3-2*c4+2) ; } } for (c3=max(max(max(max(ceild(4096*c2+N-3,3),2048*c1-2048*c2),N-1),2*T+2),2048*c2-T+2048);c3<=min(2048*c1-2048*c2+2047,floord(4096*c2+N+4090,3));c3++) { for (c4=max(2048*c2,ceild(3*c3-N+2,2));c4<=floord(3*c3-N+3,2);c4++) { c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4 ; l = 3*c3-2*c4 ; S1(-c1+2*c2,3*c1-5*c2,-c3+c4,3*c3-2*c4) ; } for (c4=ceild(3*c3-N+4,2);c4<=2048*c2+2047;c4++) { c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4 ; l = 3*c3-2*c4 ; S1(-c1+2*c2,3*c1-5*c2,-c3+c4,3*c3-2*c4) ; c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4-1 ; l = 3*c3-2*c4+2 ; S2(-c1+2*c2,3*c1-5*c2,-c3+c4-1,3*c3-2*c4+2) ; } } for (c3=max(max(N+2*T-5,2048*c2-T+1),2048*c1-2048*c2);c3<=min(min(2048*c1-2048*c2+2047,2048*c2-T+2047),N+2*T-4);c3++) { for (c4=max(2048*c2,ceild(3*c3-N+2,2));c4<=c3+T-1;c4++) { c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4 ; l = 3*c3-2*c4 ; S1(-c1+2*c2,3*c1-5*c2,-c3+c4,3*c3-2*c4) ; } c4 = c3+T ; c5 = 1 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = T-1 ; l = c3-2*T+2 ; S2(-c1+2*c2,3*c1-5*c2,T-1,c3-2*T+2) ; } if ((c1 >= 2*c2) && (c2 <= floord(N-2049,2048))) { c3 = 2048*c2+2047 ; c4 = 2048*c2+2047 ; c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = 0 ; l = 2048*c2+2047 ; S1(-c1+2*c2,3*c1-5*c2,0,2048*c2+2047) ; } for (c3=max(max(ceild(4096*c2+N+4091,3),2048*c1-2048*c2),N-1);c3<=min(min(2048*c1-2048*c2+2047,floord(4096*c2+N+4092,3)),N+2*T-4);c3++) { for (c4=ceild(3*c3-N+2,2);c4<=min(2048*c2+2047,c3+T-1);c4++) { c5 = 0 ; i = -c1+2*c2 ; j = 3*c1-5*c2 ; k = -c3+c4 ; l = 3*c3-2*c4 ; S1(-c1+2*c2,3*c1-5*c2,-c3+c4,3*c3-2*c4) ; } } } } #ifdef PERFCTR PERF_EXIT; #endif #ifdef TEST print_array(); #endif return 0; }
detector.c
#include "darknet.h" #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> int BUFFER_SIZE = 256; int USER_PORT = 9001; int HYPER_PORT = 10001; //int QUATO = 100; #define SERVER_ADDR "127.0.0.1" static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90}; void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear) { list *options = read_data_cfg(datacfg); char *train_images = option_find_str(options, "train", "data/train.list"); char *backup_directory = option_find_str(options, "backup", "/backup/"); srand(time(0)); char *base = basecfg(cfgfile); printf("%s\n", base); float avg_loss = -1; network **nets = calloc(ngpus, sizeof(network)); srand(time(0)); int seed = rand(); int i; for(i = 0; i < ngpus; ++i){ srand(seed); #ifdef GPU cuda_set_device(gpus[i]); #endif nets[i] = load_network(cfgfile, weightfile, clear); nets[i]->learning_rate *= ngpus; } srand(time(0)); network *net = nets[0]; int imgs = net->batch * net->subdivisions * ngpus; printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); data train, buffer; layer l = net->layers[net->n - 1]; int classes = l.classes; float jitter = l.jitter; list *plist = get_paths(train_images); //int N = plist->size; char **paths = (char **)list_to_array(plist); load_args args = get_base_args(net); args.coords = l.coords; args.paths = paths; args.n = imgs; args.m = plist->size; args.classes = classes; args.jitter = jitter; args.num_boxes = l.max_boxes; args.d = &buffer; args.type = DETECTION_DATA; //args.type = INSTANCE_DATA; args.threads = 64; pthread_t load_thread = load_data(args); double time; int count = 0; //while(i*imgs < N*120){ while(get_current_batch(net) < net->max_batches){ if(l.random && count++%10 == 0){ printf("Resizing\n"); int dim = (rand() % 10 + 10) * 32; if (get_current_batch(net)+200 > net->max_batches) dim = 608; //int dim = (rand() % 4 + 16) * 32; printf("%d\n", dim); args.w = dim; args.h = dim; pthread_join(load_thread, 0); train = buffer; free_data(train); load_thread = load_data(args); #pragma omp parallel for for(i = 0; i < ngpus; ++i){ resize_network(nets[i], dim, dim); } net = nets[0]; } time=what_time_is_it_now(); pthread_join(load_thread, 0); train = buffer; load_thread = load_data(args); /* int k; for(k = 0; k < l.max_boxes; ++k){ box b = float_to_box(train.y.vals[10] + 1 + k*5); if(!b.x) break; printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h); } */ /* int zz; for(zz = 0; zz < train.X.cols; ++zz){ image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]); int k; for(k = 0; k < l.max_boxes; ++k){ box b = float_to_box(train.y.vals[zz] + k*5, 1); printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); draw_bbox(im, b, 1, 1,0,0); } show_image(im, "truth11"); cvWaitKey(0); save_image(im, "truth11"); } */ printf("Loaded: %lf seconds\n", what_time_is_it_now()-time); time=what_time_is_it_now(); float loss = 0; #ifdef GPU if(ngpus == 1){ loss = train_network(net, train); } else { loss = train_networks(nets, ngpus, train, 4); } #else loss = train_network(net, train); #endif if (avg_loss < 0) avg_loss = loss; avg_loss = avg_loss*.9 + loss*.1; i = get_current_batch(net); printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs); if(i%100==0){ #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s.backup", backup_directory, base); save_weights(net, buff); } if(i%10000==0 || (i < 1000 && i%100 == 0)){ #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i); save_weights(net, buff); } free_data(train); } #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s_final.weights", backup_directory, base); save_weights(net, buff); } static int get_coco_image_id(char *filename) { char *p = strrchr(filename, '/'); char *c = strrchr(filename, '_'); if(c) p = c; return atoi(p+1); } static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h) { int i, j; int image_id = get_coco_image_id(image_path); for(i = 0; i < num_boxes; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2.; float xmax = dets[i].bbox.x + dets[i].bbox.w/2.; float ymin = dets[i].bbox.y - dets[i].bbox.h/2.; float ymax = dets[i].bbox.y + dets[i].bbox.h/2.; if (xmin < 0) xmin = 0; if (ymin < 0) ymin = 0; if (xmax > w) xmax = w; if (ymax > h) ymax = h; float bx = xmin; float by = ymin; float bw = xmax - xmin; float bh = ymax - ymin; for(j = 0; j < classes; ++j){ if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]); } } } void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h) { int i, j; for(i = 0; i < total; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1; float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1; float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1; float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1; if (xmin < 1) xmin = 1; if (ymin < 1) ymin = 1; if (xmax > w) xmax = w; if (ymax > h) ymax = h; for(j = 0; j < classes; ++j){ if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j], xmin, ymin, xmax, ymax); } } } void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h) { int i, j; for(i = 0; i < total; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2.; float xmax = dets[i].bbox.x + dets[i].bbox.w/2.; float ymin = dets[i].bbox.y - dets[i].bbox.h/2.; float ymax = dets[i].bbox.y + dets[i].bbox.h/2.; if (xmin < 0) xmin = 0; if (ymin < 0) ymin = 0; if (xmax > w) xmax = w; if (ymax > h) ymax = h; for(j = 0; j < classes; ++j){ int class = j; if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class], xmin, ymin, xmax, ymax); } } } void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile) { int j; list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "data/train.list"); char *name_list = option_find_str(options, "names", "data/names.list"); char *prefix = option_find_str(options, "results", "results"); char **names = get_labels(name_list); char *mapf = option_find_str(options, "map", 0); int *map = 0; if (mapf) map = read_map(mapf); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 2); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int classes = l.classes; char buff[1024]; char *type = option_find_str(options, "eval", "voc"); FILE *fp = 0; FILE **fps = 0; int coco = 0; int imagenet = 0; if(0==strcmp(type, "coco")){ if(!outfile) outfile = "coco_results"; snprintf(buff, 1024, "%s/%s.json", prefix, outfile); fp = fopen(buff, "w"); fprintf(fp, "[\n"); coco = 1; } else if(0==strcmp(type, "imagenet")){ if(!outfile) outfile = "imagenet-detection"; snprintf(buff, 1024, "%s/%s.txt", prefix, outfile); fp = fopen(buff, "w"); imagenet = 1; classes = 200; } else { if(!outfile) outfile = "comp4_det_test_"; fps = calloc(classes, sizeof(FILE *)); for(j = 0; j < classes; ++j){ snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]); fps[j] = fopen(buff, "w"); } } int m = plist->size; int i=0; int t; float thresh = .005; float nms = .45; int nthreads = 4; image *val = calloc(nthreads, sizeof(image)); image *val_resized = calloc(nthreads, sizeof(image)); image *buf = calloc(nthreads, sizeof(image)); image *buf_resized = calloc(nthreads, sizeof(image)); pthread_t *thr = calloc(nthreads, sizeof(pthread_t)); image input = make_image(net->w, net->h, net->c*2); load_args args = {0}; args.w = net->w; args.h = net->h; //args.type = IMAGE_DATA; args.type = LETTERBOX_DATA; for(t = 0; t < nthreads; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } double start = what_time_is_it_now(); for(i = nthreads; i < m+nthreads; i += nthreads){ fprintf(stderr, "%d\n", i); for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for(t = 0; t < nthreads && i+t < m; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ char *path = paths[i+t-nthreads]; char *id = basecfg(path); copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1); flip_image(val_resized[t]); copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1); network_predict(net, input.data); int w = val[t].w; int h = val[t].h; int num = 0; detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num); if (nms) do_nms_sort(dets, num, classes, nms); if (coco){ print_cocos(fp, path, dets, num, classes, w, h); } else if (imagenet){ print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h); } else { print_detector_detections(fps, id, dets, num, classes, w, h); } free_detections(dets, num); free(id); free_image(val[t]); free_image(val_resized[t]); } } for(j = 0; j < classes; ++j){ if(fps) fclose(fps[j]); } if(coco){ fseek(fp, -2, SEEK_CUR); fprintf(fp, "\n]\n"); fclose(fp); } fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start); } void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile) { int j; list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "data/train.list"); char *name_list = option_find_str(options, "names", "data/names.list"); char *prefix = option_find_str(options, "results", "results"); char **names = get_labels(name_list); char *mapf = option_find_str(options, "map", 0); int *map = 0; if (mapf) map = read_map(mapf); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int classes = l.classes; char buff[1024]; char *type = option_find_str(options, "eval", "voc"); FILE *fp = 0; FILE **fps = 0; int coco = 0; int imagenet = 0; if(0==strcmp(type, "coco")){ if(!outfile) outfile = "coco_results"; snprintf(buff, 1024, "%s/%s.json", prefix, outfile); fp = fopen(buff, "w"); fprintf(fp, "[\n"); coco = 1; } else if(0==strcmp(type, "imagenet")){ if(!outfile) outfile = "imagenet-detection"; snprintf(buff, 1024, "%s/%s.txt", prefix, outfile); fp = fopen(buff, "w"); imagenet = 1; classes = 200; } else { if(!outfile) outfile = "comp4_det_test_"; fps = calloc(classes, sizeof(FILE *)); for(j = 0; j < classes; ++j){ snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]); fps[j] = fopen(buff, "w"); } } int m = plist->size; int i=0; int t; float thresh = .005; float nms = .45; int nthreads = 4; image *val = calloc(nthreads, sizeof(image)); image *val_resized = calloc(nthreads, sizeof(image)); image *buf = calloc(nthreads, sizeof(image)); image *buf_resized = calloc(nthreads, sizeof(image)); pthread_t *thr = calloc(nthreads, sizeof(pthread_t)); load_args args = {0}; args.w = net->w; args.h = net->h; //args.type = IMAGE_DATA; args.type = LETTERBOX_DATA; for(t = 0; t < nthreads; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } double start = what_time_is_it_now(); for(i = nthreads; i < m+nthreads; i += nthreads){ fprintf(stderr, "%d\n", i); for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for(t = 0; t < nthreads && i+t < m; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ char *path = paths[i+t-nthreads]; char *id = basecfg(path); float *X = val_resized[t].data; network_predict(net, X); int w = val[t].w; int h = val[t].h; int nboxes = 0; detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes); if (nms) do_nms_sort(dets, nboxes, classes, nms); if (coco){ print_cocos(fp, path, dets, nboxes, classes, w, h); } else if (imagenet){ print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h); } else { print_detector_detections(fps, id, dets, nboxes, classes, w, h); } free_detections(dets, nboxes); free(id); free_image(val[t]); free_image(val_resized[t]); } } for(j = 0; j < classes; ++j){ if(fps) fclose(fps[j]); } if(coco){ fseek(fp, -2, SEEK_CUR); fprintf(fp, "\n]\n"); fclose(fp); } fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start); } void validate_detector_recall(char *cfgfile, char *weightfile) { network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths("data/coco_val_5k.list"); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int j, k; int m = plist->size; int i=0; float thresh = .001; float iou_thresh = .5; float nms = .4; int total = 0; int correct = 0; int proposals = 0; float avg_iou = 0; for(i = 0; i < m; ++i){ char *path = paths[i]; image orig = load_image_color(path, 0, 0); image sized = resize_image(orig, net->w, net->h); char *id = basecfg(path); network_predict(net, sized.data); int nboxes = 0; detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes); if (nms) do_nms_obj(dets, nboxes, 1, nms); char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int num_labels = 0; box_label *truth = read_boxes(labelpath, &num_labels); for(k = 0; k < nboxes; ++k){ if(dets[k].objectness > thresh){ ++proposals; } } for (j = 0; j < num_labels; ++j) { ++total; box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h}; float best_iou = 0; for(k = 0; k < l.w*l.h*l.n; ++k){ float iou = box_iou(dets[k].bbox, t); if(dets[k].objectness > thresh && iou > best_iou){ best_iou = iou; } } avg_iou += best_iou; if(best_iou > iou_thresh){ ++correct; } } fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total); free(id); free_image(orig); free_image(sized); } } int socket_connection_function(char* dest_addr, int dest_port) { printf("entering the socket connection ...\n"); // socket connection part int sockfd = socket(AF_INET,SOCK_STREAM, 0);//socket setting struct sockaddr_in servaddr; memset(&servaddr, 0, sizeof(servaddr)); servaddr.sin_family = AF_INET; servaddr.sin_port = htons(dest_port); servaddr.sin_addr.s_addr = inet_addr(dest_addr); if (bind(sockfd, (struct sockaddr *) &servaddr,sizeof(servaddr)) < 0) error("ERROR on binding"); listen(sockfd,5); // if (connect(sockfd, (struct sockaddr *)&servaddr, sizeof(servaddr)) < 0){perror("connect");} printf("listening from controller socket connected!\n"); return sockfd; } void recv_control_info_in_thread(void * arg) { int sockfd = (int)arg; double recv_data[BUFFER_SIZE]; while(1) { if (recv(sockfd,recv_data, sizeof(recv_data),MSG_WAITALL)<=0) break; QUATO = recv_data[0]; printf("data received! quato is %d\n",QUATO); } } void main_controller_in_thread() { int sockfd = socket_connection_function(SERVER_ADDR, HYPER_PORT); while(1) { struct sockaddr_in client_addr; socklen_t length = sizeof(client_addr); int newsockfd = accept(sockfd, (struct sockaddr *) &client_addr, &length); pthread_t recv_thread; if(pthread_create(&recv_thread, NULL, recv_control_info_in_thread, (void *)newsockfd) ==-1) perror("create hypervisor thread error!!\n"); printf("new controller connected!\n"); } } void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen) { pthread_t controller_thread; if(pthread_create(&controller_thread, NULL, main_controller_in_thread, NULL)==-1) perror("create hypervisor thread error!!\n"); list *options = read_data_cfg(datacfg); char *name_list = option_find_str(options, "names", "data/names.list"); char **names = get_labels(name_list); image **alphabet = load_alphabet(); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); double time; char buff[256]; char *input = buff; float nms=.45; while(1){ if(filename){ strncpy(input, filename, 256); } else { printf("Enter Image Path: "); fflush(stdout); input = fgets(input, 256, stdin); if(!input) return; strtok(input, "\n"); } image im = load_image_color(input,0,0); image sized = letterbox_image(im, net->w, net->h); //image sized = resize_image(im, net->w, net->h); //image sized2 = resize_max(im, net->w); //image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h); //resize_network(net, sized.w, sized.h); layer l = net->layers[net->n-1]; float *X = sized.data; time=what_time_is_it_now(); network_predict(net, X); printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time); int nboxes = 0; detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes); //printf("%d\n", nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes); free_detections(dets, nboxes); if(outfile){ save_image(im, outfile); } else{ save_image(im, "predictions"); #ifdef OPENCV //make_window("predictions", 512, 512, 0); //show_image(im, "predictions", 0); #endif } free_image(im); free_image(sized); //if (filename) break; } } /* void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip) { #ifdef OPENCV char *base = basecfg(cfgfile); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); CvCapture * cap; int w = 1280; int h = 720; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } if(w){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w); } if(h){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h); } if(!cap) error("Couldn't connect to webcam.\n"); cvNamedWindow(base, CV_WINDOW_NORMAL); cvResizeWindow(base, 512, 512); float fps = 0; int i; float nms = .45; while(1){ image in = get_image_from_stream(cap); //image in_s = resize_image(in, net->w, net->h); image in_s = letterbox_image(in, net->w, net->h); layer l = net->layers[net->n-1]; float *X = in_s.data; network_predict(net, X); int nboxes = 0; detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); for(i = 0; i < nboxes; ++i){ if(dets[i].prob[class] > thresh){ box b = dets[i].bbox; int left = b.x-b.w/2.; int top = b.y-b.h/2.; censor_image(in, left, top, b.w, b.h); } } show_image(in, base); cvWaitKey(10); free_detections(dets, nboxes); free_image(in_s); free_image(in); float curr = 0; fps = .9*fps + .1*curr; for(i = 0; i < skip; ++i){ image in = get_image_from_stream(cap); free_image(in); } } #endif } void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip) { #ifdef OPENCV char *base = basecfg(cfgfile); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); CvCapture * cap; int w = 1280; int h = 720; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } if(w){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w); } if(h){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h); } if(!cap) error("Couldn't connect to webcam.\n"); cvNamedWindow(base, CV_WINDOW_NORMAL); cvResizeWindow(base, 512, 512); float fps = 0; int i; int count = 0; float nms = .45; while(1){ image in = get_image_from_stream(cap); //image in_s = resize_image(in, net->w, net->h); image in_s = letterbox_image(in, net->w, net->h); layer l = net->layers[net->n-1]; show_image(in, base); int nboxes = 0; float *X = in_s.data; network_predict(net, X); detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); for(i = 0; i < nboxes; ++i){ if(dets[i].prob[class] > thresh){ box b = dets[i].bbox; int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h; int dx = b.x*in.w-size/2.; int dy = b.y*in.h-size/2.; image bim = crop_image(in, dx, dy, size, size); char buff[2048]; sprintf(buff, "results/extract/%07d", count); ++count; save_image(bim, buff); free_image(bim); } } free_detections(dets, nboxes); free_image(in_s); free_image(in); float curr = 0; fps = .9*fps + .1*curr; for(i = 0; i < skip; ++i){ image in = get_image_from_stream(cap); free_image(in); } } #endif } */ /* void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets) { network_predict_image(net, im); layer l = net->layers[net->n-1]; int nboxes = num_boxes(net); fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); } */ void run_detector(int argc, char **argv) { char *prefix = find_char_arg(argc, argv, "-prefix", 0); float thresh = find_float_arg(argc, argv, "-thresh", .5); float hier_thresh = find_float_arg(argc, argv, "-hier", .5); int cam_index = find_int_arg(argc, argv, "-c", 0); int frame_skip = find_int_arg(argc, argv, "-s", 0); int avg = find_int_arg(argc, argv, "-avg", 3); if(argc < 4){ fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]); return; } char *gpu_list = find_char_arg(argc, argv, "-gpus", 0); char *outfile = find_char_arg(argc, argv, "-out", 0); int *gpus = 0; int gpu = 0; int ngpus = 0; if(gpu_list){ printf("%s\n", gpu_list); int len = strlen(gpu_list); ngpus = 1; int i; for(i = 0; i < len; ++i){ if (gpu_list[i] == ',') ++ngpus; } gpus = calloc(ngpus, sizeof(int)); for(i = 0; i < ngpus; ++i){ gpus[i] = atoi(gpu_list); gpu_list = strchr(gpu_list, ',')+1; } } else { gpu = gpu_index; gpus = &gpu; ngpus = 1; } int clear = find_arg(argc, argv, "-clear"); int fullscreen = find_arg(argc, argv, "-fullscreen"); int width = find_int_arg(argc, argv, "-w", 0); int height = find_int_arg(argc, argv, "-h", 0); int fps = find_int_arg(argc, argv, "-fps", 0); //int class = find_int_arg(argc, argv, "-class", 0); char *datacfg = argv[3]; char *cfg = argv[4]; char *weights = (argc > 5) ? argv[5] : 0; char *filename = (argc > 6) ? argv[6]: 0; if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen); else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear); else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile); else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile); else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights); else if(0==strcmp(argv[2], "demo")) { list *options = read_data_cfg(datacfg); int classes = option_find_int(options, "classes", 20); char *name_list = option_find_str(options, "names", "data/names.list"); char **names = get_labels(name_list); demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen); } //else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip); //else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip); }
tensor.h
#ifndef TENSOR_H #define TENSOR_H #include "common.h" #include "dim.h" namespace tpm { class Tensor { public: enum DataType { Float32, Int32, }; enum TensorType { Input, Weight, Invalid, NotCounted, }; // TODO: is more compute state needed? enum ComputeState { NotComputed, // Allocated, // Initialized, // ComputedPartial, ComputedFull, }; private: const size_t guid; uint64_t hash; Dim dims; OpVec inputOf; Operator *outputOf; VType *data; Dim it; DataType dtype; TensorType type; ComputeState computed; static int random_seed[256 * 16]; static bool random_inited; // splitting points [dim][n-th splitting point] std::vector<std::vector<int>> splittingPoints; Dim dimPenalty; public: Tensor(TensorType type = Input, DataType dtype = Float32) : guid(generateGuid()), hash(generateHash()), outputOf(nullptr), data(nullptr), dtype(dtype), type(type), computed(NotComputed) {} Tensor(const Dim &dims, TensorType type = Input, DataType dtype = Float32) : guid(generateGuid()), hash(generateHash()), dims(dims), outputOf(nullptr), data(nullptr), dtype(dtype), type(type), computed(NotComputed) { itInit(); } Tensor(const Tensor &rhs) : Tensor(rhs.dims, rhs.type, rhs.dtype) { outputOf = nullptr; data = nullptr; hash = rhs.hash; dimPenalty = rhs.dimPenalty; itInit(); } Tensor(VType scalar, TensorType type = Weight, DataType dtype = Float32) : guid(generateGuid()), hash(generateHash()), outputOf(nullptr), data(nullptr), dtype(dtype), type(type), computed(ComputedFull) { assert(size() == 1); dataMalloc(); data[0] = scalar; } ~Tensor() { if (data != nullptr) delete[] data; } // inputOf and outputOf will not be cloned Tensor *clone() { Tensor *t = new Tensor(*this); return t; } void clone(Tensor *t) { dims = t->dims; dtype = t->dtype; type = t->type; hash = t->hash; dimPenalty = t->dimPenalty; } DataType getDType() const { return dtype; } size_t getGuid() const { return guid; } void replace(Tensor &t) { hash = t.hash; } void refresh() { hash = generateHash(); } uint64_t getHash() const { return hash; } const Dim &getDims() const { return dims; } void setDims(const Dim &dms) { dims = dms; } void setInputOf(const OpVec &vec) { inputOf = vec; } void addInputOf(Operator *op) { inputOf.emplace_back(op); } void setOutputOf(Operator *op) { outputOf = op; } // TODO: more tensor state // if tensor is clear bool isClear() { return inputOf.empty() && outputOf == nullptr && type == Input && computed == NotComputed && splittingPoints.empty(); } // set tensor to clear state void clear() { inputOf.clear(); outputOf = nullptr; type = Input; computed = NotComputed; splittingPoints.clear(); hash = generateHash(); dimPenalty.clear(); } bool isComputed() const { return computed == ComputedFull; } void setComputed() { computed = ComputedFull; } bool isScalar() const { return dims.empty(); } bool isValid() const { return type != Invalid; } void setInvalid() { type = Invalid; } bool isNotCounted() const { return type == NotCounted; } void resetPenalty() { for (auto &i : dimPenalty) i = 0; dimPenalty.resize(dims.size(), 0); } const Dim &getPenalty() { if (dimPenalty.empty()) dimPenalty.resize(dims.size(), 0); return dimPenalty; } Dim getPenalty() const { return dimPenalty.empty() ? Dim(dims.size(), 0) : dimPenalty; } void addPenalty(int d, int penalty = 1) { if (dimPenalty.empty()) dimPenalty.resize(dims.size(), 0); dimPenalty[d] += penalty; } void setPenalty(const Dim &penalty) { dimPenalty.resize(penalty.size()); dimPenalty = penalty; } const OpVec &getInputOf() { return inputOf; } Operator *getOutputOf() { return outputOf; } std::pair<Operator *, int> getOutputOfWithIndex(); bool dataMalloc() { if (data == nullptr) data = new VType[size()]; return data != nullptr; } bool dataRand(int seed = 0) { if (data == nullptr) data = new VType[size()]; if (!random_inited) initFastrand(); // srand(seed); // faster rand generator; parallel size_t iEnd = size(); // std::cerr << "Init beginned " << std::endl; #pragma omp parallel for for (size_t i = 0; i < iEnd; ++i) data[i] = fastrand(random_seed[omp_get_thread_num() * 16]) % 10000; // std::cerr << "Init finished" << std::endl; computed = ComputedFull; return true; } bool setData(VType *dptr) { if (dptr == nullptr) return false; auto sz = size(); #pragma omp parallel for for (size_t i = 0; i < sz; ++i) data[i] = dptr[i]; computed = ComputedFull; return true; } bool setScalar(VType val) { if (data == nullptr || !dims.empty()) return false; data[0] = val; return true; } bool setData(const Dim &ds, VType val) { if (data == nullptr || ds.size() != dims.size()) return false; data[getOffset(ds)] = val; return true; } bool setData(size_t pos, VType val) { if (data == nullptr || pos >= size()) return false; data[pos] = val; return true; } VType getScalar() { return data == nullptr ? 0 : data[0]; } VType getData(const Dim &ds) { assert(data != nullptr); auto offset = getOffset(ds); return offset == (size_t)-1 ? 0 : data[getOffset(ds)]; } VType getData(size_t pos) { assert(data != nullptr); assert(pos < size()); return data[pos]; } VType *getDataPtr() const { return data; } size_t getOffset(const Dim &ds) { auto nDim = ds.size(); assert(dims.size() == nDim); if (ds.empty()) return 0; for (size_t i = 0; i < nDim; ++i) if (ds[i] < 0 || ds[i] >= dims[i]) return (size_t)-1; size_t idx = ds[0]; size_t dm = 0; while (++dm < nDim) idx = idx * dims[dm] + ds[dm]; return idx; } VType getBroadcastData(const Dim &ds) { assert(data != nullptr); auto offset = getBroadcastOffset(ds); return offset == (size_t)-1 ? 0 : data[getOffset(ds)]; } VType getBroadcastData(size_t pos) { assert(data != nullptr); return data[pos % size()]; } size_t getBroadcastOffset(const Dim &ds) { assert(ds.size() >= dims.size()); auto nDim = dims.size(); auto nBroadcastDim = ds.size() - nDim; for (size_t i = 0; i < nDim; ++i) if (ds[nBroadcastDim + i] < 0 || ds[nBroadcastDim + i] >= dims[i]) return (size_t)-1; size_t idx = 0; for (size_t i = 0; i < nDim; ++i) idx = idx * dims[i] + ds[nBroadcastDim + i]; return idx; } void itInit() { it = Dim(dims.size(), 0); } void itReset() { itInit(); for (size_t i = 0, iEnd = it.size(); i < iEnd; ++i) it[i] = 0; } bool itValid() { if (it.size() != dims.size()) return false; for (size_t i = 0, iEnd = it.size(); i < iEnd; ++i) if (it[i] >= dims[i]) return false; return true; } const Dim &itGet() { return it; } void itNext() { auto p = it.size() - 1; it[p] += 1; while (p >= 1) { if (it[p] == dims[p]) { it[p] = 0; it[--p] += 1; } else break; } } size_t size() const { size_t sz = 1; auto dm = dims.size(); while (dm > 0) sz *= dims[--dm]; return sz; } TensorType getType() const { return type; } void setType(TensorType ty) { type = ty; } void print() { if (type == Invalid) { std::cout << "Invalid tensor" << std::endl; return; } if (data == nullptr || dims.size() == 0) { std::cout << "Empty tensor" << std::endl; return; } // TODO: can be uncommented after tensor's compute type is correctly set if (computed == NotComputed) { std::cout << "Uncomputed tensor" << std::endl; return; } std::cout << "Tensor: " << guid << std::endl; auto numDims = dims.size(); auto dimSzVec = std::vector<int>(numDims, 1); dimSzVec[numDims - 1] = dims[numDims - 1]; for (int i = numDims - 1; i != 0; --i) dimSzVec[i - 1] = dimSzVec[i] * dims[i - 1]; for (size_t i = 0, iEnd = size(); i < iEnd; ++i) { for (size_t j = 0; j + 1 < numDims; ++j) { if (i % dimSzVec[j] == 0) { std::cout << "["; } } std::cout << data[i]; for (size_t j = 0; j + 1 < numDims; ++j) { if ((int)i % dimSzVec[j] == dimSzVec[j] - 1) { std::cout << "]"; } } if (i != size() - 1) std::cout << ", "; if ((int)i % dimSzVec[numDims - 1] == dimSzVec[numDims - 1] - 1) std::cout << std::endl; } } static inline void initFastrand() { assert(omp_get_max_threads() <= 256); // srand(0); // constant seed for test // align random_seed to avoid false sharing for (int i = 0; i < 256 * 16; ++i) { // random_seed[i] = rand(); // constant random seed for test random_seed[i] = i; } random_inited = true; } static inline int fastrand(int &g_seed) { g_seed = (214013 * g_seed + 2531011); return (g_seed >> 16) & 0x7FFF; } std::vector<std::vector<int>> const *getSplittingPoints() const { assert(!splittingPoints.empty()); return &splittingPoints; } bool setSplittingPoints(std::vector<std::vector<int>> value) { assert(!value.empty()); splittingPoints = value; return true; } void printSplittingPoints() { if (splittingPoints.empty()) printf("Empty SplittingPoints"); else { printf("["); for (auto &vs : splittingPoints) { printf("["); for (auto v : vs) printf("%2d,", v); printf("],"); } printf("]"); } } void initSplittingPoints() { splittingPoints.resize(getDims().size()); } }; } // end of namespace tpm #endif // TENSOR_H
convolutiondepthwise_3x3_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static inline signed char float2int8(float v) { int int32 = static_cast<int>(round(v)); if (int32 > 127) return 127; if (int32 < -127) return -127; return (signed char)int32; } static void convdw3x3s1_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; int *outptr = out; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr += sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const signed char *kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; int *outptr = out; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr += sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } static void convdw3x3s1_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_dequant, const Option& opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); float *outptr = out; const float bias0 = bias ? bias[p] : 0.f; const float scale_dequant = scales_dequant[p]; out.fill(bias0); const signed char *kernel0 = (const signed char *)kernel + p * 9; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr += (float)sum * scale_dequant; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_dequant, const Option& opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const signed char *kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); float *outptr = out; const float bias0 = bias ? bias[p] : 0.f; const float scale_dequant = scales_dequant[p]; out.fill(bias0); const signed char *kernel0 = (const signed char *)kernel + p * 9; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr += (float)sum * scale_dequant; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } static void convdw3x3s1_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); signed char *outptr = out; const float bias0 = bias ? bias[p] : 0.f; const float scale_requant_in = scales_requant[2*p]; const float scale_requant_out = scales_requant[2*p+1]; const signed char *kernel0 = (const signed char *)kernel + p * 9; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr = float2int8(((float)sum * scale_requant_in + bias0) * scale_requant_out); r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const signed char *kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); signed char *outptr = out; const float bias0 = bias ? bias[p] : 0.f; const float scale_requant_in = scales_requant[2*p]; const float scale_requant_out = scales_requant[2*p+1]; const signed char *kernel0 = (const signed char *)kernel + p * 9; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr = float2int8(((float)sum * scale_requant_in + bias0) * scale_requant_out); r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
GB_unaryop__identity_uint16_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint16_uint8 // op(A') function: GB_tran__identity_uint16_uint8 // C type: uint16_t // A type: uint8_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint16_uint8 ( uint16_t *Cx, // Cx and Ax may be aliased uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint16_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
xnor_cpu.h
/*! * Copyright (c) 2017 by Contributors * \file xnor_cpu.h * \brief implementation of xnor-gemm operator for cpu * \author HPI-DeepLearning */ #ifndef MXNET_XNOR_CPU_H #define MXNET_XNOR_CPU_H #include <dmlc/logging.h> #include <mshadow/base.h> #include <stdlib.h> #include <inttypes.h> #include <assert.h> #include <limits.h> #include <tgmath.h> #include <unistd.h> #include <stdint.h> #include <string.h> namespace mxnet { namespace op { namespace xnor_cpu { // variable, position, value #define BIT_SET(var, pos, val) var |= (val << pos) //uint32_t, uint64_t, __int128 #if BINARY_WORD_32 == 1 typedef uint32_t BINARY_WORD; #endif #if BINARY_WORD_64 == 1 typedef uint64_t BINARY_WORD; #endif const int BITS_PER_BINARY_WORD (sizeof(BINARY_WORD) * CHAR_BIT); /** * @brief returns a mshadow dtype with corresponding bitwidth to BINARY_WORD * */ inline mshadow::TypeFlag corresponding_dtype() { if (BITS_PER_BINARY_WORD == 32) { return mshadow::kFloat32; } else if (BITS_PER_BINARY_WORD == 64) { return mshadow::kFloat64; } assert(false); return mshadow::kFloat32; } /** * @brief a helper method for print out bit wise result * of a binary_word * */ inline void print_int2Bin ( BINARY_WORD a ) { for (int i=0; i <BITS_PER_BINARY_WORD; i++ ) { if( a & (1 << i) ) std::cout << 1; else std::cout << 0; } std::cout<<std::endl; } inline void print_int2Bin64 ( uint64_t a ) { for (int i=0; i <64; i++ ) { if( a & (1 << i) ) std::cout << 1; else std::cout << 0; } std::cout<<std::endl; } /** * @brief this method scales the _popc(xnor(...)) result * into the dot(-1...1) result * Example: if scale range is 8, then * the dot product result based -1 and 1: * -8 -6 -4 -2 0 2 4 6 8 * XNOR&POPC result: * 0 1 2 3 4 5 6 7 8 * so the equa should be: * dot_ouput = 2 * xnor_output - scale_range */ inline float xnor_to_binary_dot ( float num, int scale_range) { return 2*num - scale_range; } /** * @brief gets the mean value over all elements of a weight volume * */ inline float get_alpha(float* weight, int width, int height, int depth) { float accum = 0.0f; for (int z = 0; z < depth; ++z) { for (int x = 0; x < width; ++x) { for (int y = 0; y < height; ++y) { accum += std::abs(weight[z * (width * height) + x * height + y]); } } } return accum / (float) (width * height * depth); } /** * @brief collects all mean values across all input filters (alpha value as described in xnor paper) * */ inline void get_alpha_plane(float* alpha_plane_out, float* weights, int num_weights, int kernel_width, int kernel_height, int input_depth) { for (int i = 0; i < num_weights; i++) { alpha_plane_out[i] = get_alpha(&weights[i * kernel_height * kernel_width * input_depth], kernel_height, kernel_width, input_depth); } } /** * @brief plane with mean off all input channels for input volume (A plane as described in xnor paper) * */ inline void get_A_planes(float* A_planes_out, float* input, int input_depth, int input_width, int input_height, int batch_size) { for (int i = 0; i < batch_size; i++) { for (int x = 0; x < input_width; ++x) { for (int y = 0; y < input_height; ++y) { float accum = 0.0f; for (int z = 0; z < input_depth; ++z) { accum += std::abs(input[i * (input_depth * input_width * input_height) + z * (input_width * input_height) + x * input_height + y]); } A_planes_out[i * input_width * input_height + x * input_height + y] = accum / (float) input_depth; } } } } /** * @brief A plane convolved with k which is defined as a w*h matrix where every * element is 1/(w*h) (K plane as described in xnor paper) * */ inline void get_K_planes(float* K_planes_out, float* A_planes, int input_width, int input_height, int kernel_width, int kernel_height, int batch_size) { int K_width = (input_width - kernel_width + 2 * 0/*padding*/) / 1/*stride*/ + 1; int K_height = (input_height - kernel_height + 2 * 0/*padding*/) / 1/*stride*/ + 1; //@todo: super naive "conv" (no real conv since our k matrix has same elements everywhere) for (int i = 0; i < batch_size; i ++) { // for every batch for (int kx = 0; kx < K_width; kx++) { for (int ky = 0; ky < K_height; ky++) { // for every kx, ky in our output plane float accum = 0; // we do collect the sum of all values covered by the kernel for (int ix = kx; ix < kx + kernel_width; ix++) { for (int iy = ky; iy < ky + kernel_height; iy++) { accum += A_planes[i * input_width * input_height + ix * input_height + iy]; } } // and multiply them with 1/(w * h) K_planes_out[i * K_width * K_height + kx * K_height + ky] = accum / ((float) kernel_height * kernel_width); } } } } /** * @brief pointwise multiplication of two same-size matrices * */ inline void pointwise_mul_mm(float *output, const float *input, int size){ for (int i = 0; i < size; i++) { output[i] *= input[i]; } } /** * @brief pointwise multiplication of matrix with a scalar * */ inline void pointwise_mul_scalar(float *output, const float scalar, int size){ for (int i = 0; i < size; i++) { output[i] *= scalar; } } /** * @brief binarize an array of floats via the sign function into a single BINARY_WORD * */ inline BINARY_WORD concatenate(float* array) { BINARY_WORD rvalue=0; BINARY_WORD sign; for (int i = 0; i < BITS_PER_BINARY_WORD; i++) { sign = (array[i]>=0); rvalue = rvalue | (sign<< (i)); } return rvalue; } /** * @brief binarize matrix * */ inline void get_binary_row(float* row, BINARY_WORD * b_row, int size){ #pragma omp parallel for for (int i = 0; i < size; i+=BITS_PER_BINARY_WORD) { BINARY_WORD rvalue=0; BINARY_WORD sign; for (int j = 0;j < BITS_PER_BINARY_WORD; ++j) { sign = (row[i+j]>=0); BIT_SET(rvalue, j, sign); } b_row[i/BITS_PER_BINARY_WORD] = rvalue; } } /** * @brief binarize matrix column wise * */ inline void get_binary_col(float* col, BINARY_WORD * b_col, int n, int k){ for(int y=0; y<(n/BITS_PER_BINARY_WORD); y++){ #pragma omp parallel for for(int x=0; x < k; ++x){ BINARY_WORD rvalue=0; BINARY_WORD sign; for(int b=0; b<BITS_PER_BINARY_WORD; ++b){ sign = (col[(y*BITS_PER_BINARY_WORD+b)*k + x]>=0); BIT_SET(rvalue, b, sign); } b_col[y*k + x] = rvalue; } } } /** * @brief binarize matrix column wise. * Loop unroll and using register vars. * ~30% performance improvement without openmp * compared with get_binary_col() method. */ inline void get_binary_col_unrolled(float* col, BINARY_WORD * b_col, int n, int k){ for(int y=0; y<(n/BITS_PER_BINARY_WORD); y++){ BINARY_WORD * y_col_pt = &b_col[y*k]; #pragma omp parallel for for(int x=0; x < k; x+=4){ register BINARY_WORD rvalue0=0, rvalue1=0, rvalue2=0, rvalue3=0; for(int b=0; b<BITS_PER_BINARY_WORD; b+=4){ register BINARY_WORD sign0, sign1, sign2, sign3, sign4, sign5, sign6, sign7, sign8, sign9, sign10, sign11, sign12, sign13, sign14, sign15; float* col_0 = &col[(y*BITS_PER_BINARY_WORD+b)*k + x]; float* col_1 = &col[(y*BITS_PER_BINARY_WORD+b+1)*k + x]; float* col_2 = &col[(y*BITS_PER_BINARY_WORD+b+2)*k + x]; float* col_3 = &col[(y*BITS_PER_BINARY_WORD+b+3)*k + x]; sign0 = (*col_0>=0); sign1 = (*col_1>=0); sign2 = (*col_2>=0); sign3 = (*col_3>=0); BIT_SET(rvalue0, b, sign0); BIT_SET(rvalue0, (b+1), sign1); BIT_SET(rvalue0, (b+2), sign2); BIT_SET(rvalue0, (b+3), sign3); sign4 = (*(col_0+1)>=0); sign5 = (*(col_1+1)>=0); sign6 = (*(col_2+1)>=0); sign7 = (*(col_3+1)>=0); BIT_SET(rvalue1, b, sign4); BIT_SET(rvalue1, (b+1), sign5); BIT_SET(rvalue1, (b+2), sign6); BIT_SET(rvalue1, (b+3), sign7); sign8 = (*(col_0+2)>=0); sign9 = (*(col_1+2)>=0); sign10 = (*(col_2+2)>=0); sign11 = (*(col_3+2)>=0); BIT_SET(rvalue2, b, sign8); BIT_SET(rvalue2, (b+1), sign9); BIT_SET(rvalue2, (b+2), sign10); BIT_SET(rvalue2, (b+3), sign11); sign12 = (*(col_0+3)>=0); sign13 = (*(col_1+3)>=0); sign14 = (*(col_2+3)>=0); sign15 = (*(col_3+3)>=0); BIT_SET(rvalue3, b, sign12); BIT_SET(rvalue3, (b+1), sign13); BIT_SET(rvalue3, (b+2), sign14); BIT_SET(rvalue3, (b+3), sign15); } BINARY_WORD * pnter = &y_col_pt[x]; *pnter = rvalue0; *(pnter+1) = rvalue1; *(pnter+2) = rvalue2; *(pnter+3) = rvalue3; } } } /** * @brief based-line xnor-gemm implementation without * dot product, but use XNOR and POPCNT * __builtin_popcountll suitable for both 32bit and 64bit * * */ void xnor_gemm(int M, int N, int K, BINARY_WORD *A, int lda, BINARY_WORD *B, int ldb, float *C, int ldc); /** * @brief simple naive baseline gemm implementation * */ inline void baseline_gemm(int M, int K, int N, float *A, int lda, float *B, int ldb, float *C, int ldc){ int i,n,k; for(i = 0; i < M; ++i){ for(n = 0; n < N; ++n){ float A_PART = A[i*lda+n]; for(k = 0; k < K; ++k){ C[i*ldc+k] += A_PART * B[n*ldb+k]; } } } } } //namespace xnor_cpu } //namespace op } //namespace mxnet #endif //MXNET_XNOR_CPU_H
pi_mc_par.c
#include <stdio.h> #include <omp.h> #include "random.h" static long num_trials = 100000000; int main () { long i; long Ncirc = 0; double pi, x, y, test, time; double r = 1.0; // radius of circle. Side of squrare is 2*r time = omp_get_wtime(); #pragma omp parallel { #pragma omp single printf(" %d threads ",omp_get_num_threads()); seed(-r, r); #pragma omp for reduction(+:Ncirc) private(x,y,test) for(i=0;i<num_trials; i++) { x = drandom(); y = drandom(); test = x*x + y*y; if (test <= r*r) Ncirc++; } } pi = 4.0 * ((double)Ncirc/(double)num_trials); printf("\n %ld trials, pi is %lf ",num_trials, pi); printf(" in %lf seconds\n",omp_get_wtime()-time); return 0; }
convolution_sgemm_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack4_msa(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 4u * 4, 4, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const float* bias = _bias; // permute Mat tmp; if (size >= 12) tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, 4u * 4, 4, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 4u * 4, 4, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 4u * 4, 4, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 4u * 4, 4, opt.workspace_allocator); else tmp.create(maxk, inch, size, 4u * 4, 4, opt.workspace_allocator); { int remain_size_start = 0; int nn_size = size / 12; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 12; float* tmpptr = tmp.channel(i / 12); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x12 v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0); v4f32 _r4 = (v4f32)__msa_ld_w(img0 + 4 * 4, 0); v4f32 _r5 = (v4f32)__msa_ld_w(img0 + 4 * 5, 0); v4f32 _r6 = (v4f32)__msa_ld_w(img0 + 4 * 6, 0); v4f32 _r7 = (v4f32)__msa_ld_w(img0 + 4 * 7, 0); v4f32 _r8 = (v4f32)__msa_ld_w(img0 + 4 * 8, 0); v4f32 _r9 = (v4f32)__msa_ld_w(img0 + 4 * 9, 0); v4f32 _ra = (v4f32)__msa_ld_w(img0 + 4 * 10, 0); v4f32 _rb = (v4f32)__msa_ld_w(img0 + 4 * 11, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4); v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4); v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6); v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6); v4i32 _r89r = __msa_ilvr_w((v4i32)_r9, (v4i32)_r8); v4i32 _r89l = __msa_ilvl_w((v4i32)_r9, (v4i32)_r8); v4i32 _rabr = __msa_ilvr_w((v4i32)_rb, (v4i32)_ra); v4i32 _rabl = __msa_ilvl_w((v4i32)_rb, (v4i32)_ra); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r89ab_0 = __msa_ilvr_d((v2i64)_rabr, (v2i64)_r89r); v2i64 _r89ab_1 = __msa_ilvl_d((v2i64)_rabr, (v2i64)_r89r); v2i64 _r89ab_2 = __msa_ilvr_d((v2i64)_rabl, (v2i64)_r89l); v2i64 _r89ab_3 = __msa_ilvl_d((v2i64)_rabl, (v2i64)_r89l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0); __msa_st_w((v4i32)_r89ab_0, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 3, 0); __msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 4, 0); __msa_st_w((v4i32)_r89ab_1, tmpptr + 4 * 5, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 6, 0); __msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 7, 0); __msa_st_w((v4i32)_r89ab_2, tmpptr + 4 * 8, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 9, 0); __msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 10, 0); __msa_st_w((v4i32)_r89ab_3, tmpptr + 4 * 11, 0); img0 += size * 4; tmpptr += 48; } } } remain_size_start += nn_size * 12; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x8 v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0); v4f32 _r4 = (v4f32)__msa_ld_w(img0 + 4 * 4, 0); v4f32 _r5 = (v4f32)__msa_ld_w(img0 + 4 * 5, 0); v4f32 _r6 = (v4f32)__msa_ld_w(img0 + 4 * 6, 0); v4f32 _r7 = (v4f32)__msa_ld_w(img0 + 4 * 7, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4); v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4); v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6); v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 3, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 4, 0); __msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 5, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 6, 0); __msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 7, 0); img0 += size * 4; tmpptr += 32; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x4 v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(img0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(img0 + 4 * 3, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 3, 0); img0 += size * 4; tmpptr += 16; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x2 v4f32 _r0 = (v4f32)__msa_ld_w(img0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(img0 + 4, 0); v4i32 _r01_0 = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01_1 = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); __msa_st_w((v4i32)_r01_0, tmpptr, 0); __msa_st_w((v4i32)_r01_1, tmpptr + 4, 0); img0 += size * 4; tmpptr += 8; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { v4f32 _val = (v4f32)__msa_ld_w(img0, 0); __msa_st_w((v4i32)_val, tmpptr, 0); img0 += size * 4; tmpptr += 4; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr0 = top_blob.channel(p); int i = 0; for (; i + 11 < size; i += 12) { const float* tmpptr = tmp.channel(i / 12); const float* kptr0 = kernel.channel(p); int nn = inch * maxk * 4; // inch always > 0 v4f32 _sum0 = bias ? (v4f32)__msa_ld_w(bias + p * 4, 0) : (v4f32)__msa_fill_w(0); v4f32 _sum1 = _sum0; v4f32 _sum2 = _sum0; v4f32 _sum3 = _sum0; v4f32 _sum4 = _sum0; v4f32 _sum5 = _sum0; v4f32 _sum6 = _sum0; v4f32 _sum7 = _sum0; v4f32 _sum8 = _sum0; v4f32 _sum9 = _sum0; v4f32 _suma = _sum0; v4f32 _sumb = _sum0; for (int j = 0; j < nn; j++) { __builtin_prefetch(tmpptr + 96); __builtin_prefetch(kptr0 + 32); v4i32 _val0123 = __msa_ld_w(tmpptr, 0); v4i32 _val4567 = __msa_ld_w(tmpptr + 4, 0); v4i32 _val89ab = __msa_ld_w(tmpptr + 8, 0); v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); _sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0); _sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0); _sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0); _sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0); _sum8 = __msa_fmadd_w(_sum8, (v4f32)__msa_splati_w(_val89ab, 0), _w0); _sum9 = __msa_fmadd_w(_sum9, (v4f32)__msa_splati_w(_val89ab, 1), _w0); _suma = __msa_fmadd_w(_suma, (v4f32)__msa_splati_w(_val89ab, 2), _w0); _sumb = __msa_fmadd_w(_sumb, (v4f32)__msa_splati_w(_val89ab, 3), _w0); tmpptr += 12; kptr0 += 4; } __msa_st_w((v4i32)_sum0, outptr0, 0); __msa_st_w((v4i32)_sum1, outptr0 + 4, 0); __msa_st_w((v4i32)_sum2, outptr0 + 4 * 2, 0); __msa_st_w((v4i32)_sum3, outptr0 + 4 * 3, 0); __msa_st_w((v4i32)_sum4, outptr0 + 4 * 4, 0); __msa_st_w((v4i32)_sum5, outptr0 + 4 * 5, 0); __msa_st_w((v4i32)_sum6, outptr0 + 4 * 6, 0); __msa_st_w((v4i32)_sum7, outptr0 + 4 * 7, 0); __msa_st_w((v4i32)_sum8, outptr0 + 4 * 8, 0); __msa_st_w((v4i32)_sum9, outptr0 + 4 * 9, 0); __msa_st_w((v4i32)_suma, outptr0 + 4 * 10, 0); __msa_st_w((v4i32)_sumb, outptr0 + 4 * 11, 0); outptr0 += 4 * 12; } for (; i + 7 < size; i += 8) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); const float* kptr0 = kernel.channel(p); int nn = inch * maxk * 4; // inch always > 0 v4f32 _sum0 = bias ? (v4f32)__msa_ld_w(bias + p * 4, 0) : (v4f32)__msa_fill_w(0); v4f32 _sum1 = _sum0; v4f32 _sum2 = _sum0; v4f32 _sum3 = _sum0; v4f32 _sum4 = _sum0; v4f32 _sum5 = _sum0; v4f32 _sum6 = _sum0; v4f32 _sum7 = _sum0; for (int j = 0; j < nn; j++) { __builtin_prefetch(tmpptr + 64); __builtin_prefetch(kptr0 + 32); v4i32 _val0123 = __msa_ld_w(tmpptr, 0); v4i32 _val4567 = __msa_ld_w(tmpptr + 4, 0); v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); _sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0); _sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0); _sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0); _sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0); tmpptr += 8; kptr0 += 4; } __msa_st_w((v4i32)_sum0, outptr0, 0); __msa_st_w((v4i32)_sum1, outptr0 + 4, 0); __msa_st_w((v4i32)_sum2, outptr0 + 4 * 2, 0); __msa_st_w((v4i32)_sum3, outptr0 + 4 * 3, 0); __msa_st_w((v4i32)_sum4, outptr0 + 4 * 4, 0); __msa_st_w((v4i32)_sum5, outptr0 + 4 * 5, 0); __msa_st_w((v4i32)_sum6, outptr0 + 4 * 6, 0); __msa_st_w((v4i32)_sum7, outptr0 + 4 * 7, 0); outptr0 += 4 * 8; } for (; i + 3 < size; i += 4) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* kptr0 = kernel.channel(p); int nn = inch * maxk * 4; // inch always > 0 v4f32 _sum0 = bias ? (v4f32)__msa_ld_w(bias + p * 4, 0) : (v4f32)__msa_fill_w(0); v4f32 _sum1 = _sum0; v4f32 _sum2 = _sum0; v4f32 _sum3 = _sum0; for (int j = 0; j < nn; j++) { __builtin_prefetch(tmpptr + 32); __builtin_prefetch(kptr0 + 32); v4i32 _val0123 = __msa_ld_w(tmpptr, 0); v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); tmpptr += 4; kptr0 += 4; } __msa_st_w((v4i32)_sum0, outptr0, 0); __msa_st_w((v4i32)_sum1, outptr0 + 4, 0); __msa_st_w((v4i32)_sum2, outptr0 + 4 * 2, 0); __msa_st_w((v4i32)_sum3, outptr0 + 4 * 3, 0); outptr0 += 4 * 4; } for (; i + 1 < size; i += 2) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* kptr0 = kernel.channel(p); int nn = inch * maxk * 4; // inch always > 0 v4f32 _sum0 = bias ? (v4f32)__msa_ld_w(bias + p * 4, 0) : (v4f32)__msa_fill_w(0); v4f32 _sum1 = _sum0; for (int j = 0; j < nn; j++) { __builtin_prefetch(tmpptr + 16); __builtin_prefetch(kptr0 + 32); v4f32 _val0 = __msa_fill_w_f32(*tmpptr++); v4f32 _val1 = __msa_fill_w_f32(*tmpptr++); v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0); _sum0 = __msa_fmadd_w(_sum0, _val0, _w0); _sum1 = __msa_fmadd_w(_sum1, _val1, _w0); kptr0 += 4; } __msa_st_w((v4i32)_sum0, outptr0, 0); __msa_st_w((v4i32)_sum1, outptr0 + 4, 0); outptr0 += 4 * 2; } for (; i < size; i++) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* kptr0 = kernel.channel(p); int nn = inch * maxk * 4; // inch always > 0 v4f32 _sum = bias ? (v4f32)__msa_ld_w(bias + p * 4, 0) : (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(tmpptr + 8); __builtin_prefetch(kptr0 + 32); v4f32 _val0 = __msa_fill_w_f32(*tmpptr++); v4f32 _w0 = (v4f32)__msa_ld_w(kptr0, 0); _sum = __msa_fmadd_w(_sum, _val0, _w0); kptr0 += 4; } __msa_st_w((v4i32)_sum, outptr0, 0); outptr0 += 4; } } } static void convolution_im2col_sgemm_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 4u * 4, 4, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * 4; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); float* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const float* sptr = img.row<const float>(dilation_h * u) + dilation_w * v * 4; for (int i = 0; i < outh; i++) { int j = 0; for (; j < outw; j++) { v4f32 _val = (v4f32)__msa_ld_w(sptr, 0); __msa_st_w((v4i32)_val, ptr, 0); sptr += stride_w * 4; ptr += 4; } sptr += gap; } } } } } im2col_sgemm_pack4_msa(bottom_im2col, top_blob, kernel, _bias, opt); }
ztrmm.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_trmm * * Performs a triangular matrix-matrix multiply of the form * * \f[B = \alpha [op(A) \times B] \f], if side = PlasmaLeft or * \f[B = \alpha [B \times op(A)] \f], if side = PlasmaRight * * where op( X ) is one of: * * - op(A) = A or * - op(A) = A^T or * - op(A) = A^H * * alpha is a scalar, B is an m-by-n matrix and A is a unit or non-unit, upper * or lower triangular matrix. * ******************************************************************************* * * @param[in] side * Specifies whether op( A ) appears on the left or on the right of B: * - PlasmaLeft: alpha*op( A )*B * - PlasmaRight: alpha*B*op( A ) * * @param[in] uplo * Specifies whether the matrix A is upper triangular or lower * triangular: * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] transa * Specifies whether the matrix A is transposed, not transposed or * conjugate transposed: * - PlasmaNoTrans: A is transposed; * - PlasmaTrans: A is not transposed; * - PlasmaConjTrans: A is conjugate transposed. * * @param[in] diag * Specifies whether or not A is unit triangular: * - PlasmaNonUnit: A is non-unit triangular; * - PlasmaUnit: A is unit triangular. * * @param[in] m * The number of rows of matrix B. * m >= 0. * * @param[in] n * The number of columns of matrix B. * n >= 0. * * @param[in] alpha * The scalar alpha. * * @param[in] pA * The triangular matrix A of dimension lda-by-k, where k is m when * side='L' or 'l' and k is n when when side='R' or 'r'. If uplo = * PlasmaUpper, the leading k-by-k upper triangular part of the array * A contains the upper triangular matrix, and the strictly lower * triangular part of A is not referenced. If uplo = PlasmaLower, the * leading k-by-k lower triangular part of the array A contains the * lower triangular matrix, and the strictly upper triangular part of * A is not referenced. If diag = PlasmaUnit, the diagonal elements of * A are also not referenced and are assumed to be 1. * * @param[in] lda * The leading dimension of the array A. When side='L' or 'l', * lda >= max(1,m), when side='R' or 'r' then lda >= max(1,n). * * @param[in,out] pB * On entry, the matrix B of dimension ldb-by-n. * On exit, the result of a triangular matrix-matrix multiply * ( alpha*op(A)*B ) or ( alpha*B*op(A) ). * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,m). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_ztrmm * @sa plasma_ctrmm * @sa plasma_dtrmm * @sa plasma_strmm * ******************************************************************************/ int plasma_ztrmm(plasma_enum_t side, plasma_enum_t uplo, plasma_enum_t transa, plasma_enum_t diag, int m, int n, plasma_complex64_t alpha, plasma_complex64_t *pA, int lda, plasma_complex64_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (side != PlasmaLeft && side != PlasmaRight) { plasma_error("illegal value of side"); return -1; } if (uplo != PlasmaUpper && uplo != PlasmaLower) { plasma_error("illegal value of uplo"); return -2; } if (transa != PlasmaConjTrans && transa != PlasmaNoTrans && transa != PlasmaTrans ) { plasma_error("illegal value of transa"); return -3; } if (diag != PlasmaUnit && diag != PlasmaNonUnit) { plasma_error("illegal value of diag"); return -4; } if (m < 0) { plasma_error("illegal value of m"); return -5; } if (n < 0) { plasma_error("illegal value of n"); return -6; } int na; if (side == PlasmaLeft) na = m; else na = n; if (lda < imax(1, na)) { plasma_error("illegal value of lda"); return -8; } if (ldb < imax(1, m)) { plasma_error("illegal value of ldb"); return -10; } // quick return if (imin(m, n) == 0) return PlasmaSuccess; // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, na, na, 0, 0, na, na, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, m, n, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Create sequence. plasma_sequence_t *sequence = NULL; retval = plasma_sequence_create(&sequence); if (retval != PlasmaSuccess) { plasma_error("plasma_sequence_create() failed"); return retval; } // Initialize request. plasma_request_t request = PlasmaRequestInitializer; // asynchronous block #pragma omp parallel #pragma omp master { // Translate matrices to tile layout. plasma_omp_zge2desc(pA, lda, A, sequence, &request); plasma_omp_zge2desc(pB, ldb, B, sequence, &request); // Call tile async interface. plasma_omp_ztrmm(side, uplo, transa, diag, alpha, A, B, sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(B, pB, ldb, sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence->status; plasma_sequence_destroy(sequence); return status; } /***************************************************************************//** * * @ingroup plasma_trmm * * Performs triangular matrix multiplication. Non-blocking tile version of * plasma_ztrmm(). May return before the computation is finished. Operates on * matrices stored by tiles. All matrices are passed through descriptors. All * dimensions are taken from the descriptors. Allows for pipelining of * operations at runtime. * ******************************************************************************* * * @param[in] side * Specifies whether op( A ) appears on the left or on the right of B: * - PlasmaLeft: alpha*op( A )*B * - PlasmaRight: alpha*B*op( A ) * * @param[in] uplo * Specifies whether the matrix A is upper triangular or lower * triangular: * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] transa * Specifies whether the matrix A is transposed, not transposed or * conjugate transposed: * - PlasmaNoTrans: A is transposed; * - PlasmaTrans: A is not transposed; * - PlasmaConjTrans: A is conjugate transposed. * * @param[in] diag * Specifies whether or not A is unit triangular: * - PlasmaNonUnit: A is non-unit triangular; * - PlasmaUnit: A is unit triangular. * * @param[in] alpha * The scalar alpha. * * @param[in] A * Descriptor of the triangular matrix A. * * @param[in,out] B * Descriptor of matrix B. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_ztrmm * @sa plasma_omp_ctrmm * @sa plasma_omp_dtrmm * @sa plasma_omp_strmm * ******************************************************************************/ void plasma_omp_ztrmm(plasma_enum_t side, plasma_enum_t uplo, plasma_enum_t transa, plasma_enum_t diag, plasma_complex64_t alpha, plasma_desc_t A, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorNotInitialized); return; } // Check input arguments. if (side != PlasmaLeft && side != PlasmaRight) { plasma_error("illegal value of side"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (uplo != PlasmaUpper && uplo != PlasmaLower) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (transa != PlasmaConjTrans && transa != PlasmaNoTrans && transa != PlasmaTrans) { plasma_error("illegal value of transa"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (diag != PlasmaUnit && diag != PlasmaNonUnit) { plasma_error("illegal value of diag"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.m == 0 || A.n == 0 || alpha == 0.0 || B.m == 0 || B.n == 0) return; // Call parallel function. plasma_pztrmm(side, uplo, transa, diag, alpha, A, B, sequence, request); }
test_parallel_reduction.c
//===-- test_pareallel_reduction.c - Test reductions at parallel --*- C -*-===// // // Part of the LOMP project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // // This file has been modified from the file // openmp/runtime/test/tasking/omp_parallel_reduction.c // of the LLVM project (https://github.com/llvm/llvm-project) // under the Apache License v2.0 with LLVM Exceptions. // //===----------------------------------------------------------------------===// #include <stdio.h> #include <stdlib.h> #include <math.h> #include "tests.h" #define DOUBLE_DIGITS 20 /* dt^DOUBLE_DIGITS */ #define MAX_FACTOR 10 #define KNOWN_PRODUCT 3628800 /* 10! */ int test_omp_parallel_reduction(void) { int sum; int known_sum; double dsum; double dknown_sum; double dt = 0.5; /* base of geometric row for + and - test*/ double rounding_error = 1.E-9; int diff; double ddiff; int product; int known_product; int logic_and; int logic_or; int bit_and; int bit_or; int exclusiv_bit_or; int logics[LOOPCOUNT]; int i; double dpt; int result; sum = 0; dsum = 0; product = 1; logic_and = 1; logic_or = 0; bit_and = 1; bit_or = 0; exclusiv_bit_or = 0; result = 0; dt = 1. / 3.; known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; /* Tests for integers */ #pragma omp parallel for schedule(dynamic, 1) private(i) reduction(+ : sum) for (i = 1; i <= LOOPCOUNT; i++) { sum = sum + i; } if (known_sum != sum) { result++; fprintf(stderr, "Error in sum with integers: Result was %d instead of %d\n", sum, known_sum); } diff = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; #pragma omp parallel for schedule(dynamic, 1) private(i) reduction(- : diff) for (i = 1; i <= LOOPCOUNT; ++i) { diff = diff - i; } if (diff != 0) { result++; fprintf(stderr, "Error in difference with integers: Result was %d instead of 0.\n", diff); } /* Tests for doubles */ dsum = 0; dpt = 1; for (i = 0; i < DOUBLE_DIGITS; ++i) { dpt *= dt; } dknown_sum = (1 - dpt) / (1 - dt); #pragma omp parallel for schedule(dynamic, 1) private(i) reduction(+ : dsum) for (i = 0; i < DOUBLE_DIGITS; ++i) { dsum += pow(dt, i); } if (fabs(dsum - dknown_sum) > rounding_error) { result++; fprintf(stderr, "Error in sum with doubles: Result was %f instead of %f " "(Difference: %E)\n", dsum, dknown_sum, dsum - dknown_sum); } dpt = 1; for (i = 0; i < DOUBLE_DIGITS; ++i) { dpt *= dt; } // fprintf(stderr,"\n"); ddiff = (1 - dpt) / (1 - dt); #pragma omp parallel for schedule(dynamic, 1) private(i) reduction(- : ddiff) for (i = 0; i < DOUBLE_DIGITS; ++i) { ddiff -= pow(dt, i); } if (fabs(ddiff) > rounding_error) { result++; fprintf(stderr, "Error in Difference with doubles: Result was %E instead of 0.0\n", ddiff); } /* Tests for product of integers */ #pragma omp parallel for schedule(dynamic, 1) private(i) reduction(* : product) for (i = 1; i <= MAX_FACTOR; i++) { product *= i; } known_product = KNOWN_PRODUCT; if (known_product != product) { result++; fprintf(stderr, "Error in Product with integers: Result was %d instead of %d\n\n", product, known_product); } /* Tests for logical and */ for (i = 0; i < LOOPCOUNT; i++) { logics[i] = 1; } #pragma omp parallel for schedule(dynamic,1) private(i) reduction(&&:logic_and) for (i = 0; i < LOOPCOUNT; ++i) { logic_and = (logic_and && logics[i]); } if (!logic_and) { result++; fprintf(stderr, "Error in logic AND part 1.\n"); } logic_and = 1; logics[LOOPCOUNT / 2] = 0; #pragma omp parallel for schedule(dynamic,1) private(i) reduction(&&:logic_and) for (i = 0; i < LOOPCOUNT; ++i) { logic_and = logic_and && logics[i]; } if (logic_and) { result++; fprintf(stderr, "Error in logic AND part 2.\n"); } /* Tests for logical or */ for (i = 0; i < LOOPCOUNT; i++) { logics[i] = 0; } #pragma omp parallel for schedule(dynamic, 1) private(i) reduction(|| \ : logic_or) for (i = 0; i < LOOPCOUNT; ++i) { logic_or = logic_or || logics[i]; } if (logic_or) { result++; fprintf(stderr, "Error in logic OR part 1.\n"); } logic_or = 0; logics[LOOPCOUNT / 2] = 1; #pragma omp parallel for schedule(dynamic, 1) private(i) reduction(|| \ : logic_or) for (i = 0; i < LOOPCOUNT; ++i) { logic_or = logic_or || logics[i]; } if (!logic_or) { result++; fprintf(stderr, "Error in logic OR part 2.\n"); } /* Tests for bitwise and */ for (i = 0; i < LOOPCOUNT; ++i) { logics[i] = 1; } #pragma omp parallel for schedule(dynamic, 1) private(i) reduction(& : bit_and) for (i = 0; i < LOOPCOUNT; ++i) { bit_and = (bit_and & logics[i]); } if (!bit_and) { result++; fprintf(stderr, "Error in BIT AND part 1.\n"); } bit_and = 1; logics[LOOPCOUNT / 2] = 0; #pragma omp parallel for schedule(dynamic, 1) private(i) reduction(& : bit_and) for (i = 0; i < LOOPCOUNT; ++i) { bit_and = bit_and & logics[i]; } if (bit_and) { result++; fprintf(stderr, "Error in BIT AND part 2.\n"); } for (i = 0; i < LOOPCOUNT; i++) { logics[i] = 0; } /* Tests for bitwise or */ #pragma omp parallel for schedule(dynamic, 1) private(i) reduction(| : bit_or) for (i = 0; i < LOOPCOUNT; ++i) { bit_or = bit_or | logics[i]; } if (bit_or) { result++; fprintf(stderr, "Error in BIT OR part 1\n"); } bit_or = 0; logics[LOOPCOUNT / 2] = 1; #pragma omp parallel for schedule(dynamic, 1) private(i) reduction(| : bit_or) for (i = 0; i < LOOPCOUNT; ++i) { bit_or = bit_or | logics[i]; } if (!bit_or) { result++; fprintf(stderr, "Error in BIT OR part 2\n"); } for (i = 0; i < LOOPCOUNT; i++) { logics[i] = 0; } /* Tests for bitwise xor */ #pragma omp parallel for schedule(dynamic,1) private(i) reduction(^:exclusiv_bit_or) for (i = 0; i < LOOPCOUNT; ++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } if (exclusiv_bit_or) { result++; fprintf(stderr, "Error in EXCLUSIVE BIT OR part 1\n"); } exclusiv_bit_or = 0; logics[LOOPCOUNT / 2] = 1; #pragma omp parallel for schedule(dynamic,1) private(i) reduction(^:exclusiv_bit_or) for (i = 0; i < LOOPCOUNT; ++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } if (!exclusiv_bit_or) { result++; fprintf(stderr, "Error in EXCLUSIVE BIT OR part 2\n"); } /*printf("\nResult:%d\n",result);*/ return (result == 0); } int main(void) { int i; int num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_omp_parallel_reduction()) { num_failed++; } } if (num_failed == 0) { printf("***PASSED***\n"); } else { printf("***FAILED***: %d failures\n", num_failed); } return num_failed != 0 ? EXIT_FAILURE : EXIT_SUCCESS; }
bridge.h
// This file is a bridge connecting the "lib interface" gbbs exports and the // interface that the current parlaylib exports. #pragma once #include <type_traits> #include <utility> #include "parlay/delayed_sequence.h" #include "parlay/internal/binary_search.h" #include "parlay/internal/get_time.h" #include "parlay/io.h" #include "parlay/monoid.h" #include "parlay/parallel.h" #include "parlay/primitives.h" #include "parlay/random.h" #include "parlay/range.h" #include "parlay/sequence.h" #include "parlay/slice.h" #include "parlay/utilities.h" namespace gbbs { // ================== parallel primitives =================== using parlay::parallel_for; using parlay::par_do; using parlay::num_workers; using parlay::worker_id; // parallel loop from start (inclusive) to end (exclusive) running // function f. // f should map long to void. // granularity is the number of iterations to run sequentially // if 0 (default) then the scheduler will decide // conservative uses a safer scheduler template <typename F> inline void parallel_for(size_t start, size_t end, long granularity, F f, bool conservative = false) { return parallel_for(start, end, f, granularity, conservative); } template <typename A, typename Af, typename Df, typename F> static void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start, long end, F f, long granularity = 0, bool conservative = false); #ifdef CILK // TODO try parallel_for_1 template <typename A, typename Af, typename Df, typename F> inline void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start, long end, F f, long granularity, bool conservative) { alloc_holder<A> alloc; parallel_for_1(start, end, [&](size_t i) { init_alloc(&alloc.imp_.view()); f(i, &(alloc.imp_.view())); // finish_alloc(&(alloc.imp_.view())); }, granularity, conservative); } #else #ifdef OPENMP template <typename A, typename Af, typename Df, typename F> inline void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start, long end, F f, long granularity, bool conservative) { A* alloc = nullptr; #pragma omp parallel private(alloc) { alloc = new A(); init_alloc(alloc); parallel_for_1(start, end, [&](size_t i) { f(i, alloc); }, granularity, conservative); //#pragma omp for schedule(dynamic, 1) nowait // for(long i=start; i<end; i++) f(i, alloc); finish_alloc(alloc); } } #else template <typename A, typename Af, typename Df, typename F> inline void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start, long end, F f, long granularity, bool conservative) { parallel_for(start, end, [&](long i) { static thread_local A* alloc = new A(); init_alloc(alloc); f(i, alloc); }, granularity, conservative); // finish_alloc(alloc); } #endif #endif template <class E> E* new_array_no_init(size_t n) { #ifndef PARLAY_USE_STD_ALLOC auto allocator = parlay::allocator<E>(); #else auto allocator = std::allocator<E>(); #endif return allocator.allocate(n); } // Initializes in parallel template <typename E> E* new_array(size_t n) { E* r = new_array_no_init<E>(n); if (!std::is_trivially_default_constructible<E>::value) { // if (!std::is_default_constructible<E>::value) { if (n > 2048) { auto f = [&](size_t i) { new ((void*)(r + i)) E; }; parallel_for(0, n, f); } else for (size_t i = 0; i < n; i++) new ((void*)(r + i)) E; } return r; } template <class E> void free_array(E* e, size_t n) { #ifndef PARLAY_USE_STD_ALLOC auto allocator = parlay::allocator<E>(); #else auto allocator = std::allocator<E>(); #endif allocator.deallocate(e, n); } // Alias template for parlay::sequence template <typename T> using sequence = parlay::sequence<T>; // Bit shorter than writing slice<T*, T*> everywhere. template <class T> using slice = parlay::slice<T*, T*>; template <typename Seq> auto make_slice(const Seq& S) { return parlay::make_slice(S.begin(), S.end()); } template <class E> slice<E> make_slice(E* start, E* end) { return parlay::make_slice((E*)start, (E*)end); } template <class T> inline slice<T> make_slice(T* A, size_t n) { return parlay::make_slice((T*)A, (T*)(A + n)); } struct empty {}; // struct containing no data (used for empty base optimization) // ========================= timer ========================== using parlay::internal::timer; // ========================= atomic ops ========================== // Currently unused, but may be useful in the future; including commented out. // template <class ET> // inline bool CAS128(ET* a, ET b, ET c) { // return __sync_bool_compare_and_swap_16((__int128*)a, *((__int128*)&b), // *((__int128*)&c)); // } template <typename ET> inline ET atomic_load(ET* a) { ET tmp; __atomic_load(a, &tmp, __ATOMIC_RELAXED); return tmp; } template <typename ET> inline void atomic_store(ET* a, ET b) { __atomic_store(a, &b, __ATOMIC_RELAXED); } template <typename ET> inline bool atomic_compare_and_swap(ET* a, ET oldval, ET newval) { if constexpr(sizeof(ET) == 1) { uint8_t r_oval, r_nval; std::memcpy(&r_oval, &oldval, sizeof(ET)); std::memcpy(&r_nval, &newval, sizeof(ET)); return __sync_bool_compare_and_swap(reinterpret_cast<uint8_t*>(a), r_oval, r_nval); } else if constexpr(sizeof(ET) == 4) { uint32_t r_oval, r_nval; std::memcpy(&r_oval, &oldval, sizeof(ET)); std::memcpy(&r_nval, &newval, sizeof(ET)); return __sync_bool_compare_and_swap(reinterpret_cast<uint32_t*>(a), r_oval, r_nval); } else if constexpr(sizeof(ET) == 8) { uint64_t r_oval, r_nval; std::memcpy(&r_oval, &oldval, sizeof(ET)); std::memcpy(&r_nval, &newval, sizeof(ET)); return __sync_bool_compare_and_swap(reinterpret_cast<uint64_t*>(a), r_oval, r_nval); } else if constexpr(sizeof(ET) == 16) { __int128 r_oval, r_nval; std::memcpy(&r_oval, &oldval, sizeof(ET)); std::memcpy(&r_nval, &newval, sizeof(ET)); return __sync_bool_compare_and_swap_16(reinterpret_cast<__int128*>(a), r_oval, r_nval); } else { std::cout << "Bad CAS Length" << sizeof(ET) << std::endl; exit(0); } } template <typename ET> inline bool atomic_compare_and_swap(volatile ET* a, ET oldval, ET newval) { if (sizeof(ET) == 1) { uint8_t r_oval, r_nval; std::memcpy(&r_oval, &oldval, sizeof(ET)); std::memcpy(&r_nval, &newval, sizeof(ET)); return __sync_bool_compare_and_swap(reinterpret_cast<volatile uint8_t*>(a), r_oval, r_nval); } else if (sizeof(ET) == 4) { uint32_t r_oval, r_nval; std::memcpy(&r_oval, &oldval, sizeof(ET)); std::memcpy(&r_nval, &newval, sizeof(ET)); return __sync_bool_compare_and_swap(reinterpret_cast<volatile uint32_t*>(a), r_oval, r_nval); } else if (sizeof(ET) == 8) { uint64_t r_oval, r_nval; std::memcpy(&r_oval, &oldval, sizeof(ET)); std::memcpy(&r_nval, &newval, sizeof(ET)); return __sync_bool_compare_and_swap(reinterpret_cast<volatile uint64_t*>(a), r_oval, r_nval); } else if (sizeof(ET) == 16) { __int128 r_oval, r_nval; std::memcpy(&r_oval, &oldval, sizeof(ET)); std::memcpy(&r_nval, &newval, sizeof(ET)); return __sync_bool_compare_and_swap_16( reinterpret_cast<volatile __int128*>(a), r_oval, r_nval); } else { std::cout << "Bad CAS Length" << sizeof(ET) << std::endl; exit(0); } } template <typename E, typename EV> inline E fetch_and_add(E* a, EV b) { volatile E newV, oldV; do { oldV = *a; newV = oldV + b; } while (!atomic_compare_and_swap(a, oldV, newV)); return oldV; } // Atomically increment *a by b unless the value is larger than max_v. // If the increment is successful, it returns an opption containing // the previous value and returns an empty std::optional<E> otherwise. template <typename E, typename EV> inline std::optional<E> fetch_and_add_threshold(E* a, EV b, EV max_v) { volatile E newV, oldV; oldV = *a; newV = oldV + b; while (oldV <= max_v) { if (atomic_compare_and_swap(a, oldV, newV)) return oldV; oldV = atomic_load(a); newV = oldV + b; } return std::nullopt; } template <typename E, typename EV> inline void write_add(E* a, EV b) { // volatile E newV, oldV; E newV, oldV; do { oldV = *a; newV = oldV + b; } while (!atomic_compare_and_swap(a, oldV, newV)); } template <typename E, typename EV> inline void write_add(std::atomic<E>* a, EV b) { // volatile E newV, oldV; E newV, oldV; do { oldV = a->load(); newV = oldV + b; } while (!std::atomic_compare_exchange_strong(a, &oldV, newV)); } template <typename ET, typename F> inline bool write_min(ET* a, ET b, F less) { ET c; bool r = 0; do c = *a; while (less(b, c) && !(r = atomic_compare_and_swap(a, c, b))); return r; } template <typename ET, typename F> inline bool write_min(volatile ET* a, ET b, F less) { ET c; bool r = 0; do c = *a; while (less(b, c) && !(r = atomic_compare_and_swap(a, c, b))); return r; } template <typename ET, typename F> inline bool write_min(std::atomic<ET>* a, ET b, F less) { ET c; bool r = 0; do c = a->load(); while (less(b, c) && !(r = std::atomic_compare_exchange_strong(a, &c, b))); return r; } template <typename ET, typename F> inline bool write_max(ET* a, ET b, F less) { ET c; bool r = 0; do c = *a; while (less(c, b) && !(r = atomic_compare_and_swap(a, c, b))); return r; } template <typename ET, typename F> inline bool write_max(volatile ET* a, ET b, F less) { ET c; bool r = 0; do c = *a; while (less(c, b) && !(r = atomic_compare_and_swap(a, c, b))); return r; } template <typename ET, typename F> inline bool write_max(std::atomic<ET>* a, ET b, F less) { ET c; bool r = 0; do c = a->load(); while (less(c, b) && !(r = std::atomic_compare_exchange_strong(a, &c, b))); return r; } template <typename ET> inline bool CAS(ET* ptr, const ET oldv, const ET newv) { return atomic_compare_and_swap(ptr, oldv, newv); } template <typename ET> inline bool write_min(ET* a, ET b) { return write_min<ET>(a, b, std::less<ET>()); } template <typename ET> inline bool write_max(ET* a, ET b) { return write_max<ET>(a, b, std::less<ET>()); } // Combines two hash values. inline uint64_t hash_combine(uint64_t hash_value_1, uint64_t hash_value_2) { // This is the same as boost's 32-bit `hash_combine` implementation, but with // 2 ^ 64 / (golden ratio) chosen as an arbitrary 64-bit additive magic number // rather than 2 ^ 32 / (golden ratio). return hash_value_1 ^ (hash_value_2 + 0x9e3779b97f4a7c15 + (hash_value_1 << 6) + (hash_value_1 >> 2)); } template <class E, class I, class P> struct filter_iter { I& iter; P& pred; E cur_val; filter_iter(I& _it, P& _pr) : iter(_it), pred(_pr) { cur_val = iter.cur(); while (!pred(cur_val) && iter.has_next()) { cur_val = iter.next(); } } E cur() { return cur_val; } E next() { while (iter.has_next()) { cur_val = iter.next(); if (pred(cur_val)) { break; } } return cur_val; } // has_next }; template <class E, class I, class P> inline filter_iter<E, I, P> make_filter_iter(I& _it, P& _pr) { return filter_iter<E, I, P>(_it, _pr); } } // namespace gbbs namespace parlay { template <class Seq> inline auto reduce_max(Seq const& I) -> typename Seq::value_type { using T = typename Seq::value_type; return reduce(make_slice(I), maxm<T>()); } template <class Seq> inline auto reduce_min(Seq const& I) -> typename Seq::value_type { using T = typename Seq::value_type; return reduce(make_slice(I), minm<T>()); } template <class Seq> inline auto reduce_xor(Seq const& I) -> typename Seq::value_type { using T = typename Seq::value_type; return reduce(make_slice(I), xorm<T>()); } using parlay::internal::sample_sort; using parlay::internal::sample_sort_inplace; using parlay::internal::pack_out; using parlay::internal::filter_out; using parlay::internal::split_two; constexpr const size_t _log_block_size = 10; constexpr const size_t _block_size = (1 << _log_block_size); inline size_t num_blocks(size_t n, size_t block_size) { if (n == 0) return 0; else return (1 + ((n)-1) / (block_size)); } // Writes the list of indices `i` where `Fl[i] == true` to range `Out`. template <class Bool_Seq, class Out_Seq> size_t pack_index_out(Bool_Seq const& Fl, Out_Seq&& Out, flags fl = no_flag) { using Idx_Type = typename std::remove_reference<Out_Seq>::type::value_type; auto identity = [](size_t i) { return (Idx_Type)i; }; return pack_out(delayed_seq<Idx_Type>(Fl.size(), identity), Fl, std::forward<Out_Seq>(Out), fl); } using parlay::internal::binary_search; constexpr size_t _F_BSIZE = 2000; // Transforms input sequence `[a_0, a_1, ..., a_{n-1}]` to sequence `[f(0, a_0), // f(1, a_1), ..., f(n-1, a_{n-1})]` using input function `f`. // // Arguments: // A: sequence-like object with elements of type `T` // Input array. // f: (size_t, T) -> OT // Function to apply to input array. // // Returns: // sequence<OT> // Result of applying `f` to each element of `A` along with the index of // that element in `A`. template <class OT, class Seq, class Func> auto map_with_index(Seq const& A, Func&& f, flags fl = no_flag) -> sequence<OT> { return sequence<OT>::from_function(A.size(), [&](size_t i) { return f(i, A[i]); }); } template <class OT, class Seq, class UnaryFunc> auto map(Seq const& A, UnaryFunc f, flags fl = no_flag) -> sequence<OT> { return sequence<OT>::from_function(A.size(), [&](size_t i) { return f(A[i]); }); } template <class In_Seq, class F> auto filter_index(In_Seq const& In, F f, flags fl = no_flag) -> sequence<typename In_Seq::value_type> { using T = typename In_Seq::value_type; size_t n = In.size(); size_t l = num_blocks(n, _block_size); sequence<size_t> Sums(l); sequence<bool> Fl(n); parlay::internal::sliced_for(n, _block_size, [&](size_t i, size_t s, size_t e) { size_t r = 0; for (size_t j = s; j < e; j++) r += (Fl[j] = f(In[j], j)); Sums[i] = r; }); size_t m = parlay::scan_inplace(make_slice(Sums)); sequence<T> Out = sequence<T>::uninitialized(m); parlay::internal::sliced_for(n, _block_size, [&](size_t i, size_t s, size_t e) { parlay::internal::pack_serial_at( make_slice(In).cut(s, e), make_slice(Fl).cut(s, e), make_slice(Out).cut(Sums[i], (i == l - 1) ? m : Sums[i + 1])); }); return Out; } template <class Idx_Type, class D, class F> inline sequence<std::tuple<Idx_Type, D> > pack_index_and_data(F& f, size_t size) { auto id_seq = parlay::delayed_seq<std::tuple<Idx_Type, D> >(size, [&](size_t i) { return std::make_tuple((Idx_Type)i, std::get<1>(f[i])); }); auto flgs_seq = parlay::delayed_seq<bool>( size, [&](size_t i) { return std::get<0>(f[i]); }); return parlay::pack(id_seq, flgs_seq); } template <class Seq, class Compare> typename Seq::value_type kth_smallest(Seq const& s, size_t k, Compare less, random r = random()) { using T = typename Seq::value_type; size_t n = s.size(); T pivot = s[r[0] % n]; sequence<T> smaller = filter(s, [&](T a) { return less(a, pivot); }); if (k < smaller.size()) return kth_smallest(smaller, k, less, r.next()); else { sequence<T> larger = filter(s, [&](T a) { return less(pivot, a); }); if (k >= n - larger.size()) return kth_smallest(larger, k - n + larger.size(), less, r.next()); else return pivot; } } template <class Seq, class Compare> typename Seq::value_type approximate_kth_smallest(Seq const& S, size_t k, Compare less, random r = random()) { // raise exception if empty sequence? using T = typename Seq::value_type; size_t n = S.size(); size_t num_samples = n / sqrt(n); sequence<T> samples = sequence<T>::from_function( num_samples, [&](size_t i) -> T { return S[r[i] % n]; }); return sample_sort(make_slice(samples), less)[k * num_samples / n]; } template <class T, class Pred> inline size_t filter_seq(T* in, T* out, size_t n, Pred p) { size_t k = 0; for (size_t i = 0; i < n; i++) if (p(in[i])) out[k++] = in[i]; return k; } // Faster for a small number in output (about 40% or less) // Destroys the input. Does not need a bool array. template <class T, class PRED> inline size_t filterf(T* In, T* Out, size_t n, PRED p) { size_t b = _F_BSIZE; if (n < b) return filter_seq(In, Out, n, p); size_t l = num_blocks(n, b); auto Sums = sequence<size_t>::uninitialized(l + 1); parallel_for(0, l, [&](size_t i) { size_t s = i * b; size_t e = std::min(s + b, n); size_t k = s; for (size_t j = s; j < e; j++) { if (p(In[j])) In[k++] = In[j]; } Sums[i] = k - s; }, 1); Sums[l] = 0; size_t m = parlay::scan_inplace(make_slice(Sums)); Sums[l] = m; parallel_for(0, l, [&](size_t i) { T* I = In + i * b; T* O = Out + Sums[i]; for (size_t j = 0; j < Sums[i + 1] - Sums[i]; j++) { O[j] = I[j]; } }, 1); return m; } // Faster for a small number in output (about 40% or less) // Destroys the input. Does not need a bool array. template <class T, class PRED, class OUT> inline size_t filterf(T* In, size_t n, PRED p, OUT out, size_t out_off) { size_t b = _F_BSIZE; if (n < b) { size_t k = out_off; for (size_t i = 0; i < n; i++) { if (p(In[i])) out(k++, In[i]); } return k - out_off; } size_t l = num_blocks(n, b); auto Sums = sequence<size_t>::uninitialized(l + 1); parallel_for(0, l, [&](size_t i) { size_t s = i * b; size_t e = std::min(s + b, n); size_t k = s; for (size_t j = s; j < e; j++) { if (p(In[j])) In[k++] = In[j]; } Sums[i] = k - s; }, 1); Sums[l] = 0; size_t m = parlay::scan_inplace(make_slice(Sums)); Sums[l] = m; parallel_for(0, l, [&](size_t i) { T* I = In + i * b; size_t si = out_off + Sums[i]; for (size_t j = 0; j < Sums[i + 1] - Sums[i]; j++) { out(si + j, I[j]); } }, 1); return m; } // String utilities inline int t_to_stringlen(long a) { return 21; } inline void type_to_string(char* s, long a) { sprintf(s, "%ld", a); } inline int t_to_stringlen(unsigned long a) { return 21; } inline void type_to_string(char* s, unsigned long a) { sprintf(s, "%lu", a); } inline uint t_to_stringlen(uint a) { return 12; } inline void type_to_string(char* s, uint a) { sprintf(s, "%u", a); } inline int t_to_stringlen(int a) { return 12; } inline void type_to_string(char* s, int a) { sprintf(s, "%d", a); } inline int t_to_stringlen(double a) { return 18; } inline int t_to_stringlen(char* a) { return strlen(a) + 1; } inline void type_to_string(char* s, char* a) { sprintf(s, "%s", a); } inline void type_to_string(char* s, double a) { sprintf(s, "%.11le", a); } template <class A, class B> inline int t_to_stringlen(std::pair<A, B> a) { return t_to_stringlen(a.first) + t_to_stringlen(a.second) + 1; } template <class A, class B> inline int t_to_stringlen(std::tuple<A, B> a) { return t_to_stringlen(std::get<0>(a)) + t_to_stringlen(std::get<1>(a)) + 1; } template <class A, class B, class C> inline int t_to_stringlen(std::tuple<A, B, C> a) { return t_to_stringlen(std::get<0>(a)) + t_to_stringlen(std::get<1>(a)) + t_to_stringlen(std::get<2>(a)) + 2; } template <class A, class B> inline void type_to_string(char* s, std::pair<A, B> a) { int l = t_to_stringlen(a.first); type_to_string(s, a.first); s[l] = ' '; type_to_string(s + l + 1, a.second); } template <class A, class B> inline void type_to_string(char* s, std::tuple<A, B> a) { int l = t_to_stringlen(std::get<0>(a)); type_to_string(s, std::get<0>(a)); s[l] = ' '; type_to_string(s + l + 1, std::get<1>(a)); } template <class A, class B, class C> inline void type_to_string(char* s, std::tuple<A, B, C> a) { int l = t_to_stringlen(std::get<0>(a)); type_to_string(s, std::get<0>(a)); s[l] = ' '; int l1 = t_to_stringlen(std::get<1>(a)); type_to_string(s + l + 1, std::get<1>(a)); s[l + l1 + 1] = ' '; type_to_string(s + l + l1 + 2, std::get<2>(a)); } template <class TSeq> sequence<char> sequence_to_string(TSeq const& T) { size_t n = T.size(); auto S = sequence<size_t>::from_function(n, [&](size_t i) { return t_to_stringlen(T[i]) + 1; // +1 for \n }); size_t m = parlay::scan_inplace(make_slice(S), addm<size_t>()); auto C = sequence<char>::from_function(m, [&](size_t i) { return (char)0; }); parallel_for(0, n - 1, [&](size_t i) { type_to_string(C.begin() + S[i], T[i]); C[S[i + 1] - 1] = '\n'; }); type_to_string(C.begin() + S[n - 1], T[n - 1]); C[m - 1] = '\n'; return parlay::filter(make_slice(C), [&](char A) { return A > 0; }); } using parlay::internal::chars_to_int_t; using parlay::internal::get_counts; } // namespace parlay
relic_multi.h
/* * RELIC is an Efficient LIbrary for Cryptography * Copyright (c) 2020 RELIC Authors * * This file is part of RELIC. RELIC is legal property of its developers, * whose names are not listed here. Please refer to the COPYRIGHT file * for contact information. * * RELIC is free software; you can redistribute it and/or modify it under the * terms of the version 2.1 (or later) of the GNU Lesser General Public License * as published by the Free Software Foundation; or version 2.0 of the Apache * License as published by the Apache Software Foundation. See the LICENSE files * for more details. * * RELIC is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the LICENSE files for more details. * * You should have received a copy of the GNU Lesser General Public or the * Apache License along with RELIC. If not, see <https://www.gnu.org/licenses/> * or <https://www.apache.org/licenses/>. */ /** * @defgroup relic Core functions */ /** * @file * * Multithreading support. * * @ingroup relic */ #ifndef RLC_MULTI_H #define RLC_MULTI_H #if defined(MULTI) #include <math.h> #if MULTI == OPENMP #include <omp.h> #elif MULTI == PTHREAD #include <pthread.h> #endif /* OPENMP */ #endif /* MULTI */ /*============================================================================*/ /* Constant definitions */ /*============================================================================*/ #if defined(MULTI) /** * If multi-threading is enabled, assigns each thread a local copy of the data. */ #if MULTI == PTHREAD #define rlc_thread __declspec(thread) #else #define rlc_thread /* */ #endif /** * Make library context private to each thread. */ #if MULTI == OPENMP /** * Active library context, only visible inside the library. */ extern ctx_t first_ctx; /** * Pointer to active library context, only visible inside the library. */ extern ctx_t *core_ctx; #pragma omp threadprivate(first_ctx, core_ctx) #endif #endif /* MULTI */ #endif /* !RLC_MULTI_H */
salted_sha1_fmt_plug.c
/* * generic salted-sha1 support for LDAP style password storage * * Copyright (c) 2003 Simon Marechal, salt length fixes (c) 2012 magnum * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_saltedsha; #elif FMT_REGISTERS_H john_register_one(&fmt_saltedsha); #else #include <string.h> #include "misc.h" #include "formats.h" #include "arch.h" #include "options.h" #include "johnswap.h" #include "salted_sha1_common.h" #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1) #endif #include "simd-intrinsics.h" #include "common.h" #include "sha.h" #include "base64_convert.h" #ifdef _OPENMP #ifdef SIMD_COEF_64 #ifndef OMP_SCALE #define OMP_SCALE 1024 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #endif #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL "Salted-SHA1" #define FORMAT_NAME "" #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH (55-MAX_SALT_LEN) #define BINARY_ALIGN 4 #define SALT_SIZE (MAX_SALT_LEN + sizeof(unsigned int)) #define SALT_ALIGN 4 #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*4*SIMD_COEF_32 ) //for endianity conversion #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif struct s_salt { unsigned int len; union { unsigned char c[MAX_SALT_LEN+1]; ARCH_WORD_32 w32; } data; }; static struct s_salt *saved_salt; #ifdef SIMD_COEF_32 static ARCH_WORD_32 (*saved_key)[SHA_BUF_SIZ*NBKEYS]; static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE/4*NBKEYS]; static unsigned int *saved_len; static unsigned char out[PLAINTEXT_LENGTH + 1]; static int last_salt_size; #else static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / 4]; #endif static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifndef SIMD_COEF_32 saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); #else saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS, sizeof(*saved_key), MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS, sizeof(*crypt_key), MEM_ALIGN_SIMD); #endif } static void done(void) { MEM_FREE(crypt_key); MEM_FREE(saved_key); #ifdef SIMD_COEF_32 MEM_FREE(saved_len); #endif } static void * get_binary(char *ciphertext) { static char *realcipher; if (!realcipher) realcipher = mem_alloc_tiny(BINARY_SIZE + MAX_SALT_LEN + 4, MEM_ALIGN_WORD); ciphertext += NSLDAP_MAGIC_LENGTH; memset(realcipher, 0, BINARY_SIZE); base64_convert(ciphertext, e_b64_mime, strlen(ciphertext), realcipher, e_b64_raw, BINARY_SIZE+MAX_SALT_LEN, 0, 0); #ifdef SIMD_COEF_32 alter_endianity((unsigned char *)realcipher, BINARY_SIZE); #endif return (void *)realcipher; } static void set_key(char *key, int index) { #ifdef SIMD_COEF_32 #if ARCH_ALLOWS_UNALIGNED const ARCH_WORD_32 *wkey = (ARCH_WORD_32*)key; #else char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t)); const ARCH_WORD_32 *wkey = (uint32_t*)(is_aligned(key, sizeof(uint32_t)) ? key : strcpy(buf_aligned, key)); #endif ARCH_WORD_32 *keybuffer = &((ARCH_WORD_32*)saved_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32]; ARCH_WORD_32 *keybuf_word = keybuffer; unsigned int len; ARCH_WORD_32 temp; len = 0; while((unsigned char)(temp = *wkey++)) { if (!(temp & 0xff00)) { *keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 8)); len++; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80 << 16)); len+=2; goto key_cleaning; } if (!(temp & 0xff000000)) { *keybuf_word = JOHNSWAP(temp | (0x80U << 24)); len+=3; goto key_cleaning; } *keybuf_word = JOHNSWAP(temp); len += 4; keybuf_word += SIMD_COEF_32; } *keybuf_word = 0x80000000; key_cleaning: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } saved_len[index] = len; #else strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); #endif } static void * get_salt(char * ciphertext) { static struct s_salt cursalt; char realcipher[BINARY_SIZE + MAX_SALT_LEN + 4]; int len; ciphertext += NSLDAP_MAGIC_LENGTH; memset(realcipher, 0, sizeof(realcipher)); memset(&cursalt, 0, sizeof(struct s_salt)); len = strlen(ciphertext); cursalt.len = base64_convert(ciphertext, e_b64_mime, len, realcipher, e_b64_raw, BINARY_SIZE+MAX_SALT_LEN, 0, 0) - BINARY_SIZE; memcpy(cursalt.data.c, realcipher+BINARY_SIZE, cursalt.len); return &cursalt; } static char *get_key(int index) { #ifdef SIMD_COEF_32 unsigned int i,s; s = saved_len[index]; for(i=0;i<s;i++) out[i] = ((char*)saved_key)[GETPOS(i, index)]; out[i] = 0; return (char *) out; #else return saved_key[index]; #endif } static int cmp_all(void *binary, int count) { unsigned int index; for (index = 0; index < count; index++) #ifdef SIMD_COEF_32 if (((ARCH_WORD_32 *) binary)[0] == ((ARCH_WORD_32*)crypt_key)[(index&(SIMD_COEF_32-1)) + index/SIMD_COEF_32*5*SIMD_COEF_32]) #else if ( ((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)&(crypt_key[index][0]))[0] ) #endif return 1; return 0; } static int cmp_exact(char *source, int index) { return (1); } static int cmp_one(void * binary, int index) { #ifdef SIMD_COEF_32 int i; for (i = 0; i < BINARY_SIZE/sizeof(ARCH_WORD_32); i++) if (((ARCH_WORD_32 *) binary)[i] != ((ARCH_WORD_32*)crypt_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*5*SIMD_COEF_32+i*SIMD_COEF_32]) return 0; return 1; #else return !memcmp(binary, crypt_key[index], BINARY_SIZE); #endif } static void set_salt(void *salt) { saved_salt = salt; } #ifdef SIMD_COEF_32 static inline void set_onesalt(int index) { unsigned int i, idx=index%NBKEYS; unsigned char *sk = (unsigned char*)&saved_key[index/NBKEYS]; for(i=0;i<saved_salt->len;++i) sk[GETPOS(i+saved_len[index], idx)] = saved_salt->data.c[i]; sk[GETPOS(i+saved_len[index], idx)] = 0x80; while (++i <= last_salt_size) sk[GETPOS(i+saved_len[index], idx)] = 0; ((unsigned int*)sk)[15*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + idx/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = (saved_salt->len + saved_len[index])<<3; } #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #ifdef SIMD_COEF_32 int inc = NBKEYS; #else int inc = 1; #endif #pragma omp parallel for for (index=0; index < count; index += inc) #endif { #ifdef SIMD_COEF_32 unsigned int i; for(i=0;i<NBKEYS;i++) set_onesalt(i+index); SIMDSHA1body(saved_key[index/NBKEYS], crypt_key[index/NBKEYS], NULL, SSEi_MIXED_IN); #else SHA_CTX ctx; SHA1_Init( &ctx ); SHA1_Update( &ctx, (unsigned char *) saved_key[index], strlen( saved_key[index] ) ); SHA1_Update( &ctx, (unsigned char *) saved_salt->data.c, saved_salt->len); SHA1_Final( (unsigned char *)crypt_key[index], &ctx); #endif } #ifdef SIMD_COEF_32 last_salt_size = saved_salt->len; #endif return count; } #ifdef SIMD_COEF_32 #define HASH_OFFSET (index&(SIMD_COEF_32-1))+(((unsigned int)index%NBKEYS)/SIMD_COEF_32)*SIMD_COEF_32*5 static int get_hash_0(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_6; } #else static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; } #endif static int salt_hash(void *salt) { struct s_salt * mysalt = salt; return mysalt->data.w32 & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_saltedsha = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD, { NULL }, { NSLDAP_MAGIC }, salted_sha1_common_tests }, { init, done, fmt_default_reset, fmt_default_prepare, salted_sha1_common_valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
simd-4.c
/* { dg-do run } */ /* { dg-additional-options "-msse2" { target sse2_runtime } } */ /* { dg-additional-options "-mavx" { target avx_runtime } } */ extern void abort (); int a[1024] __attribute__((aligned (32))) = { 1 }; struct S { int s; }; #pragma omp declare reduction (+:struct S:omp_out.s += omp_in.s) #pragma omp declare reduction (foo:struct S:omp_out.s += omp_in.s) #pragma omp declare reduction (foo:int:omp_out += omp_in) __attribute__((noinline, noclone)) int foo (void) { int i, u = 0; struct S s, t; s.s = 0; t.s = 0; #pragma omp simd aligned(a : 32) reduction(+:s) reduction(foo:t, u) for (i = 0; i < 1024; i++) { int x = a[i]; s.s += x; t.s += x; u += x; } if (t.s != s.s || u != s.s) abort (); return s.s; } int main () { int i; for (i = 0; i < 1024; i++) a[i] = (i & 31) + (i / 128); int s = foo (); if (s != 19456) abort (); return 0; }
DRB007-indirectaccess3-orig-yes.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two pointers have distance of 12 (p1 - p2 = 12). They are used as base addresses for indirect array accesses using an index set (another array). An index set has two indices with distance of 12 : indexSet[3]- indexSet[0] = 533 - 521 = 12 So there is loop carried dependence for N=0 and N=3. We use the default loop scheduling (static even) in OpenMP. It is possible that two dependent iterations will be scheduled within a same chunk to a same thread. So there is no runtime data races. N is 180, two iteraions with N=0 and N= 3 have loop carried dependences. For static even scheduling, we must have at least 60 threads (18060=3 iterations) so iteration 0 and 3 will be scheduled to two different threads. Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5 */ #include <assert.h> #include <stdio.h> #include <stdlib.h> /* 521+12=533 */ int indexSet[180] = {521, 523, 525, 533, 529, 531, 547, 549, 551, 553, 555, 557, 573, 575, 577, 579, 581, 583, 599, 601, 603, 605, 607, 609, 625, 627, 629, 631, 633, 635, 651, 653, 655, 657, 659, 661, 859, 861, 863, 865, 867, 869, 885, 887, 889, 891, 893, 895, 911, 913, 915, 917, 919, 921, 937, 939, 941, 943, 945, 947, 963, 965, 967, 969, 971, 973, 989, 991, 993, 995, 997, 999, 1197, 1199, 1201, 1203, 1205, 1207, 1223, 1225, 1227, 1229, 1231, 1233, 1249, 1251, 1253, 1255, 1257, 1259, 1275, 1277, 1279, 1281, 1283, 1285, 1301, 1303, 1305, 1307, 1309, 1311, 1327, 1329, 1331, 1333, 1335, 1337, 1535, 1537, 1539, 1541, 1543, 1545, 1561, 1563, 1565, 1567, 1569, 1571, 1587, 1589, 1591, 1593, 1595, 1597, 1613, 1615, 1617, 1619, 1621, 1623, 1639, 1641, 1643, 1645, 1647, 1649, 1665, 1667, 1669, 1671, 1673, 1675, 1873, 1875, 1877, 1879, 1881, 1883, 1899, 1901, 1903, 1905, 1907, 1909, 1925, 1927, 1929, 1931, 1933, 1935, 1951, 1953, 1955, 1957, 1959, 1961, 1977, 1979, 1981, 1983, 1985, 1987, 2003, 2005, 2007, 2009, 2011, 2013}; int main(int argc, char * argv[]) { double * base = (double * )malloc(sizeof (double)*((2013+12)+1)); double * xa1 = base; double * xa2 = xa1+12; int i; int _ret_val_0; if (base==0) { printf("Error in malloc(). Aborting ...\n"); _ret_val_0=1; return _ret_val_0; } /* initialize segments touched by indexSet */ #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for for (i=521; i<=2025; ++ i) { base[i]=(0.5*i); } #pragma loop name main#1 for (i=0; i<180; ++ i) { int idx = indexSet[i]; xa1[idx]+=1.0; xa2[idx]+=3.0; } printf("x1[999]=%f xa2[1285]=%f\n", xa1[999], xa2[1285]); free(base); _ret_val_0=0; return _ret_val_0; }
threading_std.h
#include <cassert> #include <cstddef> #include <future> #include <type_traits> #include <vector> #include "thread_count.h" #ifndef THREADING_STD_LAUNCH #define THREADING_STD_LAUNCH async // async or deferred #endif namespace threading_common { class split {}; class auto_partitioner {}; // class static_partitioner; // class affinity_partitioner; //! A range over which to iterate. template <typename Value> class blocked_range { public: //! Type of a value /** Called a const_iterator for sake of algorithms that need to treat a blocked_range as an STL container. */ using const_iterator = Value; //! Type for size of a range using size_type = std::size_t; //! Construct range over half-open interval [begin,end), with the given grainsize. blocked_range(Value begin_, Value end_ /*TODO , size_type grainsize_=1*/) : my_end(end_) , my_begin(begin_) //, my_grainsize(grainsize_) { // assert( my_grainsize>0 && "grainsize must be positive" ); } //! Beginning of range. const_iterator begin() const { return my_begin; } //! One past last value in range. const_iterator end() const { return my_end; } //! Size of the range /** Unspecified if end()<begin(). */ size_type size() const { assert(!(end() < begin()) && "size() unspecified if end()<begin()"); return size_type(my_end - my_begin); } //! The grain size for this range. size_type grainsize() const { return 1 /*my_grainsize*/; } //------------------------------------------------------------------------ // Methods that implement Range concept //------------------------------------------------------------------------ //! True if range is empty. bool empty() const { return !(my_begin < my_end); } //! True if range is divisible. /** Unspecified if end()<begin(). */ bool is_divisible() const { return /*TODO my_grainsize<*/ size(); } //! Split range. /** The new Range *this has the second part, the old range r has the first part. Unspecified if end()<begin() or !is_divisible(). */ blocked_range(blocked_range& r, split) : my_end(r.my_end) , my_begin(do_split(r, split())) // TODO , my_grainsize(r.my_grainsize) { // only comparison 'less than' is required from values of blocked_range objects assert(!(my_begin < r.my_end) && !(r.my_end < my_begin) && "blocked_range has been split incorrectly"); } private: /** NOTE: my_end MUST be declared before my_begin, otherwise the splitting constructor * will break. */ Value my_end; Value my_begin; // TODO size_type my_grainsize; //! Auxiliary function used by the splitting constructor. static Value do_split(blocked_range& r, split) { assert(r.is_divisible() && "cannot split blocked_range that is not divisible"); Value middle = r.my_begin + (r.my_end - r.my_begin) / 2u; r.my_end = middle; return middle; } }; } // namespace threading_common namespace threading_std { using std::future; using namespace threading_common; constexpr auto launch = std::launch::THREADING_STD_LAUNCH; template <typename Fn, typename... Args, typename Result = std::result_of_t<Fn && (Args && ...)>> future<Result> async(Fn&& fn, Args&&... args) { return std::async(launch, std::forward<Fn>(fn), std::forward<Args>(args)...); } class task_group { std::vector<future<void>> threads_; public: template <typename F> void run(F&& f) { threads_.emplace_back(async(std::forward<F>(f))); } void cancel() { /*not implemented*/ } void wait() { // TODO task_group_status ? for (auto& child : this->threads_) { child.wait(); } } }; // class task_group //! Parallel iteration over range with default partitioner. /** @ingroup algorithms **/ // template<typename Range, typename Body, typename Partitioner = auto_partitioner> // void parallel_for( const Range& range, const Body& body, const Partitioner &p = // Partitioner()); template <typename Int, typename Body, typename Partitioner = auto_partitioner> void parallel_for(const blocked_range<Int>& range, const Body& body, const Partitioner& p = Partitioner()) { const Int worker_count = cpu_threads(); std::vector<std::future<void>> worker_threads; worker_threads.reserve(worker_count); for (Int i = 0, start_entry = range.begin(), stop_entry = range.end(), stride = (range.size() + worker_count - 1) / worker_count; i < worker_count && start_entry < stop_entry; ++i, start_entry += stride) { const auto end_entry = std::min(start_entry + stride, stop_entry); // TODO grainsize? worker_threads.emplace_back( std::async(launch, body, blocked_range<Int>(start_entry, end_entry))); } for (auto& child : worker_threads) { child.wait(); } } //! Parallel iteration over a range of integers with a default step value and default //! partitioner template <typename Index, typename Function, typename Partitioner = auto_partitioner> void parallel_for(Index first, Index last, const Function& f, const Partitioner& p = Partitioner()) { parallel_for( blocked_range<Index>(first, last), [&f](const blocked_range<Index>& r) { //#pragma ivdep //#pragma omp simd for (auto i = r.begin(), e = r.end(); i < e; i++) { f(i); } }, p); } //! Parallel iteration with reduction /** @ingroup algorithms **/ template <typename Int, typename Value, typename RealBody, typename Reduction, typename Partitioner = auto_partitioner> Value parallel_reduce(const blocked_range<Int>& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, const Partitioner& p = Partitioner()) { const size_t worker_count = cpu_threads(); std::vector<std::future<Value>> worker_threads; worker_threads.reserve(worker_count); for (Int i = 0, start_entry = range.begin(), stop_entry = range.end(), stride = (range.size() + worker_count - 1) / worker_count; i < worker_count && start_entry < stop_entry; ++i, start_entry += stride) { const auto end_entry = std::min(start_entry + stride, stop_entry); // TODO grainsize? worker_threads.emplace_back(std::async( launch, real_body, blocked_range<Int>(start_entry, end_entry), Value{})); } Value v = identity; for (auto& child : worker_threads) { v = reduction(v, child.get()); } return v; } } // namespace threading_std
axmy.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ extern "C" void FUNC(axmy)(const dlong & N, const dfloat& alpha, const dfloat * __restrict__ cpu_w, dfloat * __restrict__ cpu_a){ #ifdef __NEKRS__OMP__ #pragma omp parallel for #endif for(int i=0;i<N;++i){ const dfloat ai = cpu_a[i]; const dfloat wi = cpu_w[i]; cpu_a[i] = alpha*ai*wi; } }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 8; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
9.norace3.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 100 int main() { int sum = 0; #pragma omp master for (int i = 0; i < N; i++) { sum += i; } return sum; } // We do not support inter SCoP data races for now // CHECK: Region is Data Race Free. // END
val_omp.c
/* This file performs the following test: each OMP thread measures flops for its provided tasks, and compares this to expected flop counts, each thread having been provided with a random amount of work, such that the time and order that they complete their measurements varies. Specifically tested is the case where the value returned for some threads actually corresponds to that for another thread reading its counter values at the same time. - It is based on zero_omp.c but ignored much of its functionality. - It attempts to use the following two counters. It may use less depending on hardware counter resource limitations. These are counted in the default counting domain and default granularity, depending on the platform. Usually this is the user domain (PAPI_DOM_USER) and thread context (PAPI_GRN_THR). + PAPI_FP_INS + PAPI_TOT_CYC Each thread inside the Thread routine: - Do prework (MAX_FLOPS - flops) - Get cyc. - Get us. - Start counters - Do flops - Stop and read counters - Get us. - Get cyc. - Return flops */ #include "papi_test.h" #ifdef _OPENMP #include <omp.h> #else #error "This compiler does not understand OPENMP" #endif const int MAX_FLOPS = NUM_FLOPS; extern int TESTS_QUIET; /* Declared in test_utils.c */ const PAPI_hw_info_t *hw_info = NULL; long long Thread( int n ) { int retval, num_tests = 1; int EventSet1 = PAPI_NULL; int PAPI_event, mask1; int num_events1; long long flops; long long **values; long long elapsed_us, elapsed_cyc; char event_name[PAPI_MAX_STR_LEN]; /* printf("Thread(n=%d) 0x%x started\n", n, omp_get_thread_num()); */ num_events1 = 2; /* add PAPI_TOT_CYC and one of the events in PAPI_FP_INS, PAPI_FP_OPS or PAPI_TOT_INS, depending on the availability of the event on the platform */ EventSet1 = add_two_events( &num_events1, &PAPI_event, &mask1 ); retval = PAPI_event_code_to_name( PAPI_event, event_name ); if ( retval != PAPI_OK ) test_fail( __FILE__, __LINE__, "PAPI_event_code_to_name", retval ); values = allocate_test_space( num_tests, num_events1 ); do_flops( MAX_FLOPS - n ); /* prework for balance */ elapsed_us = PAPI_get_real_usec( ); elapsed_cyc = PAPI_get_real_cyc( ); retval = PAPI_start( EventSet1 ); if ( retval != PAPI_OK ) test_fail( __FILE__, __LINE__, "PAPI_start", retval ); do_flops( n ); retval = PAPI_stop( EventSet1, values[0] ); if ( retval != PAPI_OK ) test_fail( __FILE__, __LINE__, "PAPI_stop", retval ); flops = ( values[0] )[0]; elapsed_us = PAPI_get_real_usec( ) - elapsed_us; elapsed_cyc = PAPI_get_real_cyc( ) - elapsed_cyc; remove_test_events( &EventSet1, mask1 ); if ( !TESTS_QUIET ) { /*printf("Thread 0x%x %-12s : \t%lld\t%d\n", omp_get_thread_num(), event_name, (values[0])[0], n); */ #if 0 printf( "Thread 0x%x PAPI_TOT_CYC: \t%lld\n", omp_get_thread_num( ), values[0][0] ); printf( "Thread 0x%x Real usec : \t%lld\n", omp_get_thread_num( ), elapsed_us ); printf( "Thread 0x%x Real cycles : \t%lld\n", omp_get_thread_num( ), elapsed_cyc ); #endif } /* It is illegal for the threads to exit in OpenMP */ /* test_pass(__FILE__,0,0); */ free_test_space( values, num_tests ); PAPI_unregister_thread( ); /* printf("Thread 0x%x finished\n", omp_get_thread_num()); */ return flops; } int main( int argc, char **argv ) { int tid, retval; int maxthr = omp_get_max_threads( ); int flopper = 0; long long *flops = calloc( maxthr, sizeof ( long long ) ); long long *flopi = calloc( maxthr, sizeof ( long long ) ); tests_quiet( argc, argv ); /* Set TESTS_QUIET variable */ if ( maxthr < 2 ) test_skip( __FILE__, __LINE__, "omp_get_num_threads < 2", PAPI_EINVAL ); if ( ( flops == NULL ) || ( flopi == NULL ) ) test_fail( __FILE__, __LINE__, "calloc", PAPI_ENOMEM ); retval = PAPI_library_init( PAPI_VER_CURRENT ); if ( retval != PAPI_VER_CURRENT ) test_fail( __FILE__, __LINE__, "PAPI_library_init", retval ); hw_info = PAPI_get_hardware_info( ); if ( hw_info == NULL ) test_fail( __FILE__, __LINE__, "PAPI_get_hardware_info", 2 ); retval = PAPI_thread_init( ( unsigned long ( * )( void ) ) ( omp_get_thread_num ) ); if ( retval != PAPI_OK ) if ( retval == PAPI_ESBSTR ) test_skip( __FILE__, __LINE__, "PAPI_thread_init", retval ); else test_fail( __FILE__, __LINE__, "PAPI_thread_init", retval ); flopper = Thread( 65536 ) / 65536; printf( "flopper=%d\n", flopper ); for ( int i = 0; i < 100000; i++ ) #pragma omp parallel private(tid) { tid = omp_get_thread_num( ); flopi[tid] = rand( ) * 3; flops[tid] = Thread( ( flopi[tid] / flopper ) % MAX_FLOPS ); #pragma omp barrier #pragma omp master if ( flops[tid] < flopi[tid] ) { printf( "test iteration=%d\n", i ); for ( int j = 0; j < omp_get_num_threads( ); j++ ) { printf( "Thread 0x%x Value %6lld %c %6lld", j, flops[j], ( flops[j] < flopi[j] ) ? '<' : '=', flopi[j] ); for ( int k = 0; k < omp_get_num_threads( ); k++ ) if ( ( k != j ) && ( flops[k] == flops[j] ) ) printf( " == Thread 0x%x!", k ); printf( "\n" ); } test_fail( __FILE__, __LINE__, "value returned for thread", PAPI_EBUG ); } } test_pass( __FILE__, NULL, 0 ); exit( 0 ); }
red_black_constantcoef_gs.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ #include "_hypre_struct_ls.h" #include "red_black_gs.h" #ifndef hypre_abs #define hypre_abs(a) (((a)>0) ? (a) : -(a)) #endif /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_RedBlackConstantCoefGS( void *relax_vdata, hypre_StructMatrix *A, hypre_StructVector *b, hypre_StructVector *x ) { hypre_RedBlackGSData *relax_data = (hypre_RedBlackGSData *)relax_vdata; HYPRE_Int max_iter = (relax_data -> max_iter); HYPRE_Int zero_guess = (relax_data -> zero_guess); HYPRE_Int rb_start = (relax_data -> rb_start); HYPRE_Int diag_rank = (relax_data -> diag_rank); hypre_ComputePkg *compute_pkg = (relax_data -> compute_pkg); HYPRE_Int ndim = hypre_StructMatrixNDim(A); hypre_CommHandle *comm_handle; hypre_BoxArrayArray *compute_box_aa; hypre_BoxArray *compute_box_a; hypre_Box *compute_box; hypre_Box *A_dbox; hypre_Box *b_dbox; hypre_Box *x_dbox; HYPRE_Int Ai, Astart, Ani, Anj; HYPRE_Int bi, bstart, bni, bnj; HYPRE_Int xi, xstart, xni, xnj; HYPRE_Int xoff0, xoff1, xoff2, xoff3, xoff4, xoff5; HYPRE_Real *Ap; HYPRE_Real *App; HYPRE_Real *bp; HYPRE_Real *xp; /* constant coefficient */ HYPRE_Int constant_coeff= hypre_StructMatrixConstantCoefficient(A); HYPRE_Real App0, App1, App2, App3, App4, App5, AApd; hypre_IndexRef start; hypre_Index loop_size; hypre_StructStencil *stencil; hypre_Index *stencil_shape; HYPRE_Int stencil_size; HYPRE_Int offd[6]; HYPRE_Int iter, rb, redblack, d; HYPRE_Int compute_i, i, j, ii, jj, kk; HYPRE_Int ni, nj, nk; /*---------------------------------------------------------- * Initialize some things and deal with special cases *----------------------------------------------------------*/ hypre_BeginTiming(relax_data -> time_index); hypre_StructMatrixDestroy(relax_data -> A); hypre_StructVectorDestroy(relax_data -> b); hypre_StructVectorDestroy(relax_data -> x); (relax_data -> A) = hypre_StructMatrixRef(A); (relax_data -> x) = hypre_StructVectorRef(x); (relax_data -> b) = hypre_StructVectorRef(b); (relax_data -> num_iterations) = 0; /* if max_iter is zero, return */ if (max_iter == 0) { /* if using a zero initial guess, return zero */ if (zero_guess) { hypre_StructVectorSetConstantValues(x, 0.0); } hypre_EndTiming(relax_data -> time_index); return hypre_error_flag; } else { stencil = hypre_StructMatrixStencil(A); stencil_shape = hypre_StructStencilShape(stencil); stencil_size = hypre_StructStencilSize(stencil); /* get off-diag entry ranks ready */ i = 0; for (j = 0; j < stencil_size; j++) { if (j != diag_rank) { offd[i] = j; i++; } } } hypre_StructVectorClearBoundGhostValues(x, 0); /*---------------------------------------------------------- * Do zero_guess iteration *----------------------------------------------------------*/ rb = rb_start; iter = 0; if (zero_guess) { for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: { compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg); } break; case 1: { compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg); } break; } hypre_ForBoxArrayI(i, compute_box_aa) { compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i); A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i); b_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(b), i); x_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); Ap = hypre_StructMatrixBoxData(A, i, diag_rank); bp = hypre_StructVectorBoxData(b, i); xp = hypre_StructVectorBoxData(x, i); hypre_ForBoxI(j, compute_box_a) { compute_box = hypre_BoxArrayBox(compute_box_a, j); start = hypre_BoxIMin(compute_box); hypre_BoxGetSize(compute_box, loop_size); /* Are we relaxing index start or start+(1,0,0)? */ redblack = rb; for (d = 0; d < ndim; d++) { redblack += hypre_IndexD(start, d); } redblack = hypre_abs(redblack) % 2; bstart = hypre_BoxIndexRank(b_dbox, start); xstart = hypre_BoxIndexRank(x_dbox, start); ni = hypre_IndexX(loop_size); nj = hypre_IndexY(loop_size); nk = hypre_IndexZ(loop_size); bni = hypre_BoxSizeX(b_dbox); xni = hypre_BoxSizeX(x_dbox); bnj = hypre_BoxSizeY(b_dbox); xnj = hypre_BoxSizeY(x_dbox); if (ndim < 3) { nk = 1; if (ndim < 2) { nj = 1; } } if (constant_coeff == 1) { Ai= hypre_CCBoxIndexRank(A_dbox, start); AApd= 1.0/Ap[Ai]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii,jj,bi,xi,kk) HYPRE_SMP_SCHEDULE #endif for (kk = 0; kk < nk; kk++) { for (jj = 0; jj < nj; jj++) { ii = (kk + jj + redblack) % 2; bi = bstart + kk*bnj*bni + jj*bni + ii; xi = xstart + kk*xnj*xni + jj*xni + ii; for (; ii < ni; ii+=2, bi+=2, xi+=2) { xp[xi] = bp[bi]*AApd; } } } } else /* variable coefficient diag */ { Astart = hypre_BoxIndexRank(A_dbox, start); Ani = hypre_BoxSizeX(A_dbox); Anj = hypre_BoxSizeY(A_dbox); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii,jj,Ai,bi,xi,kk) HYPRE_SMP_SCHEDULE #endif for (kk = 0; kk < nk; kk++) { for (jj = 0; jj < nj; jj++) { ii = (kk + jj + redblack) % 2; Ai = Astart + kk*Anj*Ani + jj*Ani + ii; bi = bstart + kk*bnj*bni + jj*bni + ii; xi = xstart + kk*xnj*xni + jj*xni + ii; for (; ii < ni; ii+=2, Ai+=2, bi+=2, xi+=2) { xp[xi] = bp[bi] / Ap[Ai]; } } } } } } } rb = (rb + 1) % 2; iter++; } /*---------------------------------------------------------- * Do regular iterations *----------------------------------------------------------*/ while (iter < 2*max_iter) { for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: { xp = hypre_StructVectorData(x); hypre_InitializeIndtComputations(compute_pkg, xp, &comm_handle); compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg); } break; case 1: { hypre_FinalizeIndtComputations(comm_handle); compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg); } break; } hypre_ForBoxArrayI(i, compute_box_aa) { compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i); A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i); b_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(b), i); x_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); Ap = hypre_StructMatrixBoxData(A, i, diag_rank); bp = hypre_StructVectorBoxData(b, i); xp = hypre_StructVectorBoxData(x, i); hypre_ForBoxI(j, compute_box_a) { compute_box = hypre_BoxArrayBox(compute_box_a, j); start = hypre_BoxIMin(compute_box); hypre_BoxGetSize(compute_box, loop_size); /* Are we relaxing index start or start+(1,0,0)? */ redblack = rb; for (d = 0; d < ndim; d++) { redblack += hypre_IndexD(start, d); } redblack = hypre_abs(redblack) % 2; bstart = hypre_BoxIndexRank(b_dbox, start); xstart = hypre_BoxIndexRank(x_dbox, start); ni = hypre_IndexX(loop_size); nj = hypre_IndexY(loop_size); nk = hypre_IndexZ(loop_size); bni= hypre_BoxSizeX(b_dbox); xni= hypre_BoxSizeX(x_dbox); bnj= hypre_BoxSizeY(b_dbox); xnj= hypre_BoxSizeY(x_dbox); Ai = hypre_CCBoxIndexRank(A_dbox, start); if (ndim < 3) { nk = 1; if (ndim < 2) { nj = 1; } } switch(stencil_size) { case 7: App = hypre_StructMatrixBoxData(A, i, offd[5]); App5= App[Ai]; App = hypre_StructMatrixBoxData(A, i, offd[4]); App4= App[Ai]; xoff5 = hypre_BoxOffsetDistance( x_dbox, stencil_shape[offd[5]]); xoff4 = hypre_BoxOffsetDistance( x_dbox, stencil_shape[offd[4]]); case 5: App = hypre_StructMatrixBoxData(A, i, offd[3]); App3= App[Ai]; App = hypre_StructMatrixBoxData(A, i, offd[2]); App2= App[Ai]; xoff3 = hypre_BoxOffsetDistance( x_dbox, stencil_shape[offd[3]]); xoff2 = hypre_BoxOffsetDistance( x_dbox, stencil_shape[offd[2]]); case 3: App = hypre_StructMatrixBoxData(A, i, offd[1]); App1= App[Ai]; App = hypre_StructMatrixBoxData(A, i, offd[0]); App0= App[Ai]; xoff1 = hypre_BoxOffsetDistance( x_dbox, stencil_shape[offd[1]]); xoff0 = hypre_BoxOffsetDistance( x_dbox, stencil_shape[offd[0]]); break; } if (constant_coeff == 1) { AApd = 1/Ap[Ai]; switch(stencil_size) { case 7: #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii,jj,bi,xi,kk) HYPRE_SMP_SCHEDULE #endif for (kk = 0; kk < nk; kk++) { for (jj = 0; jj < nj; jj++) { ii = (kk + jj + redblack) % 2; bi = bstart + kk*bnj*bni + jj*bni + ii; xi = xstart + kk*xnj*xni + jj*xni + ii; for (; ii < ni; ii+=2, bi+=2, xi+=2) { xp[xi] = (bp[bi] - App0*xp[xi + xoff0] - App1*xp[xi + xoff1] - App2*xp[xi + xoff2] - App3*xp[xi + xoff3] - App4*xp[xi + xoff4] - App5*xp[xi + xoff5])*AApd; } } } break; case 5: #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii,jj,bi,xi,kk) HYPRE_SMP_SCHEDULE #endif for (kk = 0; kk < nk; kk++) { for (jj = 0; jj < nj; jj++) { ii = (kk + jj + redblack) % 2; bi = bstart + kk*bnj*bni + jj*bni + ii; xi = xstart + kk*xnj*xni + jj*xni + ii; for (; ii < ni; ii+=2, bi+=2, xi+=2) { xp[xi] = (bp[bi] - App0*xp[xi + xoff0] - App1*xp[xi + xoff1] - App2*xp[xi + xoff2] - App3*xp[xi + xoff3])*AApd; } } } break; case 3: #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii,jj,bi,xi,kk) HYPRE_SMP_SCHEDULE #endif for (kk = 0; kk < nk; kk++) { for (jj = 0; jj < nj; jj++) { ii = (kk + jj + redblack) % 2; bi = bstart + kk*bnj*bni + jj*bni + ii; xi = xstart + kk*xnj*xni + jj*xni + ii; for (; ii < ni; ii+=2, bi+=2, xi+=2) { xp[xi] = (bp[bi] - App0*xp[xi + xoff0] - App1*xp[xi + xoff1])*AApd; } } } break; } } /* if (constant_coeff == 1) */ else /* variable diagonal */ { Astart = hypre_BoxIndexRank(A_dbox, start); Ani = hypre_BoxSizeX(A_dbox); Anj = hypre_BoxSizeY(A_dbox); switch(stencil_size) { case 7: #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii,jj,Ai,bi,xi,kk) HYPRE_SMP_SCHEDULE #endif for (kk = 0; kk < nk; kk++) { for (jj = 0; jj < nj; jj++) { ii = (kk + jj + redblack) % 2; Ai = Astart + kk*Anj*Ani + jj*Ani + ii; bi = bstart + kk*bnj*bni + jj*bni + ii; xi = xstart + kk*xnj*xni + jj*xni + ii; for (; ii < ni; ii+=2, Ai+=2, bi+=2, xi+=2) { xp[xi] = (bp[bi] - App0*xp[xi + xoff0] - App1*xp[xi + xoff1] - App2*xp[xi + xoff2] - App3*xp[xi + xoff3] - App4*xp[xi + xoff4] - App5*xp[xi + xoff5]) / Ap[Ai]; } } } break; case 5: #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii,jj,Ai,bi,xi,kk) HYPRE_SMP_SCHEDULE #endif for (kk = 0; kk < nk; kk++) { for (jj = 0; jj < nj; jj++) { ii = (kk + jj + redblack) % 2; Ai = Astart + kk*Anj*Ani + jj*Ani + ii; bi = bstart + kk*bnj*bni + jj*bni + ii; xi = xstart + kk*xnj*xni + jj*xni + ii; for (; ii < ni; ii+=2, Ai+=2, bi+=2, xi+=2) { xp[xi] = (bp[bi] - App0*xp[xi + xoff0] - App1*xp[xi + xoff1] - App2*xp[xi + xoff2] - App3*xp[xi + xoff3]) / Ap[Ai]; } } } break; case 3: #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii,jj,Ai,bi,xi,kk) HYPRE_SMP_SCHEDULE #endif for (kk = 0; kk < nk; kk++) { for (jj = 0; jj < nj; jj++) { ii = (kk + jj + redblack) % 2; Ai = Astart + kk*Anj*Ani + jj*Ani + ii; bi = bstart + kk*bnj*bni + jj*bni + ii; xi = xstart + kk*xnj*xni + jj*xni + ii; for (; ii < ni; ii+=2, Ai+=2, bi+=2, xi+=2) { xp[xi] = (bp[bi] - App0*xp[xi + xoff0] - App1*xp[xi + xoff1]) / Ap[Ai]; } } } break; } /* switch(stencil_size) */ } /* else */ } } } rb = (rb + 1) % 2; iter++; } (relax_data -> num_iterations) = iter / 2; /*----------------------------------------------------------------------- * Return *-----------------------------------------------------------------------*/ hypre_IncFLOPCount(relax_data -> flops); hypre_EndTiming(relax_data -> time_index); return hypre_error_flag; }
GB_binop__pair_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_uint16) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__pair_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_uint16) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: uint16_t // A type: uint16_t // A pattern? 1 // B type: uint16_t // B pattern? 1 // BinaryOp: cij = 1 #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // true if values of A are not used #define GB_A_IS_PATTERN \ 1 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_UINT16 || GxB_NO_PAIR_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pair_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
GB_unop__bnot_int32_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__bnot_int32_int32) // op(A') function: GB (_unop_tran__bnot_int32_int32) // C type: int32_t // A type: int32_t // cast: int32_t cij = aij // unaryop: cij = ~(aij) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ~(x) ; // casting #define GB_CAST(z, aij) \ int32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = aij ; \ Cx [pC] = ~(z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BNOT || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__bnot_int32_int32) ( int32_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; int32_t z = aij ; Cx [p] = ~(z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; int32_t z = aij ; Cx [p] = ~(z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__bnot_int32_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
filtering_openmesh.h
#pragma once #include <omp.h> #include <queue> #include "../common/openmesh_report.h" #include "../common/openmesh_trimesh.h" /** *computeSigma_s() */ double computeSigma_s( const std::vector<TriMesh::VertexHandle>& vertex_neighbour, const TriMesh& mesh, const TriMesh::Point pi, const TriMesh::Normal ni) { float offset = 0; float sum = 0; float sum_sqs = 0; size_t count = vertex_neighbour.size(); for (size_t i = 0; i < count; ++i) { TriMesh::Point pj = mesh.point(vertex_neighbour[i]); float t = (pj - pi) | ni; t = sqrt(t * t); sum += t; sum_sqs += t * t; } float c = static_cast<float>(count); offset = (sum_sqs / c) - ((sum * sum) / (c * c)); float sigma_s = (sqrt(offset) < 1.0e-12) ? (sqrt(offset) + 1.0e-12) : sqrt(offset); return sigma_s; } /** * getAdaptiveVertexNeighbor() */ void getAdaptiveVertexNeighbor( TriMesh& mesh, TriMesh::VertexHandle vh, float sigma_c, std::vector<TriMesh::VertexHandle>& vertex_neighbor) { std::vector<bool> mark(mesh.n_vertices(), false); vertex_neighbor.clear(); std::queue<TriMesh::VertexHandle> queue_vertex_handle; mark[vh.idx()] = true; queue_vertex_handle.push(vh); float radius = 2.0 * sigma_c; TriMesh::Point ci = mesh.point(vh); while (!queue_vertex_handle.empty()) { TriMesh::VertexHandle vh = queue_vertex_handle.front(); vertex_neighbor.push_back(vh); queue_vertex_handle.pop(); for (TriMesh::VertexVertexIter vv_it = mesh.vv_iter(vh); vv_it.is_valid(); ++vv_it) { TriMesh::VertexHandle vh_neighbor = *vv_it; if (mark[vh_neighbor.idx()] == false) { TriMesh::Point cj = mesh.point(vh_neighbor); float length = (cj - ci).length(); if (length <= radius) queue_vertex_handle.push(vh_neighbor); mark[vh_neighbor.idx()] = true; } } } } template <typename T> void filtering_openmesh(const int num_omp_threads, TriMesh& input_mesh, std::vector<std::vector<T>>& filtered_coord, size_t& max_neighbour_size) { // Report OpenMeshReport report("Filtering_OpenMesh"); report.command_line(Arg.argc, Arg.argv); report.system(); report.model_data(Arg.obj_file_name, input_mesh); std::string method = "OpenMesh " + std::to_string(num_omp_threads) + " Core"; report.add_member("method", method); report.add_member("num_filter_iter", Arg.num_filter_iter); // this where each thread will store its neighbour vertices // we allocate enough space such that each thread can store as much // neighbour vertices as the number of vertices in the mesh. std::vector<std::vector<TriMesh::VertexHandle>> vertex_neighbour; for (int i = 0; i < num_omp_threads; ++i) { std::vector<TriMesh::VertexHandle> vn; vn.reserve(input_mesh.n_vertices()); vertex_neighbour.push_back(vn); } max_neighbour_size = 0; rxmesh::CPUTimer timer; timer.start(); for (uint32_t itr = 0; itr < Arg.num_filter_iter; ++itr) { input_mesh.request_face_normals(); input_mesh.request_vertex_normals(); input_mesh.update_normals(); const int num_vertrices = static_cast<int>(input_mesh.n_vertices()); #pragma omp parallel for schedule(static) num_threads(num_omp_threads) \ reduction(max \ : max_neighbour_size) for (int vert = 0; vert < num_vertrices; vert++) { TriMesh::VertexIter v_it = input_mesh.vertices_begin() + vert; int tid = omp_get_thread_num(); // calculate sigma_c TriMesh::Point pi = input_mesh.point(*v_it); TriMesh::Normal ni = input_mesh.normal(*v_it); float sigma_c = 1e10; for (TriMesh::VertexVertexIter vv_it = input_mesh.vv_iter(*v_it); vv_it.is_valid(); vv_it++) { TriMesh::Point pj = input_mesh.point(*vv_it); float length = (pi - pj).length(); if (length < sigma_c) { sigma_c = length; } } // get the neighbor vertices vertex_neighbour[tid].clear(); getAdaptiveVertexNeighbor( input_mesh, *v_it, sigma_c, vertex_neighbour[tid]); max_neighbour_size = max(max_neighbour_size, vertex_neighbour[tid].size()); // calculate sigma_s float sigma_s = computeSigma_s(vertex_neighbour[tid], input_mesh, pi, ni); float sum = 0; float normalizer = 0; // calculate new vertex position for (int iv = 0; iv < (int)vertex_neighbour[tid].size(); iv++) { TriMesh::Point pj = input_mesh.point(vertex_neighbour[tid][iv]); float t = (pi - pj).length(); float h = (pj - pi) | ni; float wc = std::exp(-0.5 * t * t / (sigma_c * sigma_c)); float ws = std::exp(-0.5 * h * h / (sigma_s * sigma_s)); sum += wc * ws * h; normalizer += wc * ws; } auto updated_point = pi + ni * (sum / normalizer); filtered_coord[vert][0] = updated_point[0]; filtered_coord[vert][1] = updated_point[1]; filtered_coord[vert][2] = updated_point[2]; } // update the mesh for the next iterations (needed to update the // normals correctly) #pragma omp parallel for schedule(static) num_threads(num_omp_threads) for (int vert = 0; vert < num_vertrices; vert++) { TriMesh::VertexIter v_it = input_mesh.vertices_begin() + vert; TriMesh::Point p; p[0] = filtered_coord[vert][0]; p[1] = filtered_coord[vert][1]; p[2] = filtered_coord[vert][2]; input_mesh.set_point(*v_it, p); } } timer.stop(); report.add_member("max_neighbour_size", uint32_t(max_neighbour_size)); RXMESH_TRACE("filtering_openmesh() max_neighbour_size= {}", max_neighbour_size); RXMESH_TRACE("filtering_openmesh() took {} (ms) (i.e., {} ms/iter) ", timer.elapsed_millis(), timer.elapsed_millis() / float(Arg.num_filter_iter)); // write output // std::string fn = STRINGIFY(OUTPUT_DIR) "filtering_openmesh.obj"; // if (!OpenMesh::IO::write_mesh(input_mesh, fn)) { // RXMESH_WARN("OpenMesh cannot write mesh to file {}", fn); //} // Finalize report report.add_member("total_time (ms)", timer.elapsed_millis()); rxmesh::TestData td; td.test_name = "MCF"; td.num_threads = num_omp_threads; td.time_ms.push_back(timer.elapsed_millis()); td.passed.push_back(true); report.add_test(td); report.write( Arg.output_folder + "/openmesh", "MCF_OpenMesh_" + rxmesh::extract_file_name(Arg.obj_file_name)); }
uccsd_t.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include "config.h" #include "np_helper/np_helper.h" #include "vhf/fblas.h" typedef struct { void *cache[6]; short a; short b; short c; short _padding; } CacheJob; double _ccsd_t_get_energy(double *w, double *v, double *mo_energy, int nocc, int a, int b, int c, double fac); size_t _ccsd_t_gen_jobs(CacheJob *jobs, int nocc, int nvir, int a0, int a1, int b0, int b1, void *cache_row_a, void *cache_col_a, void *cache_row_b, void *cache_col_b, size_t stride); void _make_permute_indices(int *idx, int n); double _ccsd_t_zget_energy(double complex *w, double complex *v, double *mo_energy, int nocc, int a, int b, int c, double fac); /* * w + w.transpose(1,2,0) + w.transpose(2,0,1) * - w.transpose(2,1,0) - w.transpose(0,2,1) - w.transpose(1,0,2) */ static void add_and_permute(double *out, double *w, double *v, int n) { int nn = n * n; int nnn = nn * n; int i, j, k; for (i = 0; i < nnn; i++) { v[i] += w[i]; } for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { for (k = 0; k < n; k++) { out[i*nn+j*n+k] = v[i*nn+j*n+k] + v[j*nn+k*n+i] + v[k*nn+i*n+j] - v[k*nn+j*n+i] - v[i*nn+k*n+j] - v[j*nn+i*n+k]; } } } } /* * t2T = t2.transpose(2,3,0,1) * ov = vv_op[:,nocc:] * oo = vv_op[:,:nocc] * w = numpy.einsum('if,fjk->ijk', -ov, t2T[c]) * w-= numpy.einsum('ijm,mk->ijk', vooo[a], t2T[b,c]) * v = numpy.einsum('ij,k->ijk', oo, t1T[c]) * v+= w */ static void get_wv(double *w, double *v, double *cache, double *fvohalf, double *vooo, double *vv_op, double *t1T, double *t2T, int nocc, int nvir, int a, int b, int c, int *idx) { const double D0 = 0; const double D1 = 1; const double DN1 =-1; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const int nmo = nocc + nvir; const int noo = nocc * nocc; const size_t nooo = nocc * noo; const size_t nvoo = nvir * noo; int i, j, k, n; double *pt2T; dgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir, &DN1, t2T+c*nvoo, &noo, vv_op+nocc, &nmo, &D0, cache, &noo); dgemm_(&TRANS_N, &TRANS_T, &nocc, &noo, &nocc, &DN1, t2T+b*nvoo+c*noo, &nocc, vooo+a*nooo, &noo, &D1, cache, &nocc); pt2T = t2T + a * nvoo + b * noo; for (n = 0, i = 0; i < nocc; i++) { for (j = 0; j < nocc; j++) { for (k = 0; k < nocc; k++, n++) { w[idx[n]] += cache[n]; v[idx[n]] +=(vv_op[i*nmo+j] * t1T[c*nocc+k] + pt2T[i*nocc+j] * fvohalf[c*nocc+k]); } } } } static void sym_wv(double *w, double *v, double *cache, double *fvohalf, double *vooo, double *vv_op, double *t1T, double *t2T, int nocc, int nvir, int a, int b, int c, int nirrep, int *o_ir_loc, int *v_ir_loc, int *oo_ir_loc, int *orbsym, int *idx) { const double D0 = 0; const double D1 = 1; const char TRANS_N = 'N'; const int nmo = nocc + nvir; const int noo = nocc * nocc; const int nooo = nocc * noo; const int nvoo = nvir * noo; int a_irrep = orbsym[nocc+a]; int b_irrep = orbsym[nocc+b]; int c_irrep = orbsym[nocc+c]; int ab_irrep = a_irrep ^ b_irrep; int bc_irrep = c_irrep ^ b_irrep; int i, j, k, n; int fr, f0, f1, df, mr, m0, m1, dm, mk0; int ir, i0, i1, di, kr, k0, k1, dk, jr; int ijr, ij0, ij1, dij, jkr, jk0, jk1, djk; double *pt2T; /* symmetry adapted * w = numpy.einsum('if,fjk->ijk', ov, t2T[c]) */ pt2T = t2T + c * nvoo; for (ir = 0; ir < nirrep; ir++) { i0 = o_ir_loc[ir]; i1 = o_ir_loc[ir+1]; di = i1 - i0; if (di > 0) { fr = ir ^ ab_irrep; f0 = v_ir_loc[fr]; f1 = v_ir_loc[fr+1]; df = f1 - f0; if (df > 0) { jkr = fr ^ c_irrep; jk0 = oo_ir_loc[jkr]; jk1 = oo_ir_loc[jkr+1]; djk = jk1 - jk0; if (djk > 0) { dgemm_(&TRANS_N, &TRANS_N, &djk, &di, &df, &D1, pt2T+f0*noo+jk0, &noo, vv_op+i0*nmo+nocc+f0, &nmo, &D0, cache, &djk); for (n = 0, i = o_ir_loc[ir]; i < o_ir_loc[ir+1]; i++) { for (jr = 0; jr < nirrep; jr++) { kr = jkr ^ jr; for (j = o_ir_loc[jr]; j < o_ir_loc[jr+1]; j++) { for (k = o_ir_loc[kr]; k < o_ir_loc[kr+1]; k++, n++) { w[idx[i*noo+j*nocc+k]] -= cache[n]; } } } } } } } } /* symmetry adapted * w-= numpy.einsum('ijm,mk->ijk', eris_vooo[a], t2T[c,b]) */ pt2T = t2T + c * nvoo + b * noo; vooo += a * nooo; mk0 = oo_ir_loc[bc_irrep]; for (mr = 0; mr < nirrep; mr++) { m0 = o_ir_loc[mr]; m1 = o_ir_loc[mr+1]; dm = m1 - m0; if (dm > 0) { kr = mr ^ bc_irrep; k0 = o_ir_loc[kr]; k1 = o_ir_loc[kr+1]; dk = k1 - k0; if (dk > 0) { ijr = mr ^ a_irrep; ij0 = oo_ir_loc[ijr]; ij1 = oo_ir_loc[ijr+1]; dij = ij1 - ij0; if (dij > 0) { dgemm_(&TRANS_N, &TRANS_N, &dk, &dij, &dm, &D1, pt2T+mk0, &dk, vooo+ij0*nocc+m0, &nocc, &D0, cache, &dk); for (n = 0, ir = 0; ir < nirrep; ir++) { jr = ijr ^ ir; for (i = o_ir_loc[ir]; i < o_ir_loc[ir+1]; i++) { for (j = o_ir_loc[jr]; j < o_ir_loc[jr+1]; j++) { for (k = o_ir_loc[kr]; k < o_ir_loc[kr+1]; k++, n++) { w[idx[i*noo+j*nocc+k]] -= cache[n]; } } } } } mk0 += dm * dk; } } } pt2T = t2T + a * nvoo + b * noo; for (n = 0, i = 0; i < nocc; i++) { for (j = 0; j < nocc; j++) { for (k = 0; k < nocc; k++, n++) { v[idx[n]] +=(vv_op[i*nmo+j] * t1T[c*nocc+k] + pt2T[i*nocc+j] * fvohalf[c*nocc+k]); } } } } static double contract6_aaa(int nocc, int nvir, int a, int b, int c, double *mo_energy, double *t1T, double *t2T, int nirrep, int *o_ir_loc, int *v_ir_loc, int *oo_ir_loc, int *orbsym, double *fvo, double *vooo, double *cache1, void **cache, int *permute_idx) { int nooo = nocc * nocc * nocc; int *idx0 = permute_idx; int *idx1 = idx0 + nooo; int *idx2 = idx1 + nooo; int *idx3 = idx2 + nooo; int *idx4 = idx3 + nooo; int *idx5 = idx4 + nooo; double *v0 = cache1; double *w0 = v0 + nooo; double *z0 = w0 + nooo; double *wtmp = z0; int i; for (i = 0; i < nooo; i++) { w0[i] = 0; v0[i] = 0; } if (nirrep == 1) { get_wv(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c, idx0); get_wv(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b, idx1); get_wv(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c, idx2); get_wv(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a, idx3); get_wv(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b, idx4); get_wv(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a, idx5); } else { sym_wv(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c, nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx0); sym_wv(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b, nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx1); sym_wv(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c, nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx2); sym_wv(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a, nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx3); sym_wv(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b, nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx4); sym_wv(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a, nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx5); } add_and_permute(z0, w0, v0, nocc); double et; if (a == c) { et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1./6); } else if (a == b || b == c) { et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, .5); } else { et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1.); } return et; } void CCuccsd_t_aaa(double complex *e_tot, double *mo_energy, double *t1T, double *t2T, double *vooo, double *fvo, int nocc, int nvir, int a0, int a1, int b0, int b1, int nirrep, int *o_ir_loc, int *v_ir_loc, int *oo_ir_loc, int *orbsym, double *cache_row_a, double *cache_col_a, double *cache_row_b, double *cache_col_b) { int da = a1 - a0; int db = b1 - b0; CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1); size_t njobs = _ccsd_t_gen_jobs(jobs, nocc, nvir, a0, a1, b0, b1, cache_row_a, cache_col_a, cache_row_b, cache_col_b, sizeof(double)); double *fvohalf = malloc(sizeof(double) * nvir*nocc); int i; for (i = 0; i < nvir*nocc; i++) { fvohalf[i] = fvo[i] * .5; } int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6); _make_permute_indices(permute_idx, nocc); #pragma omp parallel default(none) \ shared(njobs, nocc, nvir, mo_energy, t1T, t2T, nirrep, o_ir_loc, \ v_ir_loc, oo_ir_loc, orbsym, vooo, fvohalf, jobs, e_tot, \ permute_idx) { int a, b, c; size_t k; double *cache1 = malloc(sizeof(double) * (nocc*nocc*nocc*3+2)); double e = 0; #pragma omp for schedule (dynamic, 4) for (k = 0; k < njobs; k++) { a = jobs[k].a; b = jobs[k].b; c = jobs[k].c; e += contract6_aaa(nocc, nvir, a, b, c, mo_energy, t1T, t2T, nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, fvohalf, vooo, cache1, jobs[k].cache, permute_idx); } free(cache1); #pragma omp critical *e_tot += e; } free(permute_idx); free(fvohalf); } /************************************************* * * UCCSD(T) beta-alpha-alpha * *************************************************/ static void get_wv_baa(double *w, double *v, double **vs_ts, double **cache, int nocca, int noccb, int nvira, int nvirb, int a, int b, int c, double *cache1) { double *fvo = vs_ts[2]; double *fVO = vs_ts[3]; double *vooo = vs_ts[4]; double *vOoO = vs_ts[5]; double *VoOo = vs_ts[6]; double *t1aT = vs_ts[7]; double *t1bT = vs_ts[8]; double *t2aaT = vs_ts[9]; double *t2abT = vs_ts[10]; double *vvop = cache[0]; double *vVoP = cache[1]; double *VvOp = cache[2]; const double D0 = 0; const double D1 = 1; const double D2 = 2; const char TRANS_T = 'T'; const char TRANS_N = 'N'; const int nmoa = nocca + nvira; const int nmob = noccb + nvirb; const int noo = nocca * nocca; const int nOo = noccb * nocca; const size_t nooo = nocca * noo; const size_t noOo = nocca * nOo; const size_t nOoO = noccb * nOo; const size_t nvoo = nvira * noo; const int nVoO = nvirb * nOo; int i, j, k, n; /* * t2aaT = t2aa.transpose(2,3,0,1) * w = numpy.einsum('ejI,ke->Ijk', t2abT[:,a], vvov) * 2 * w += numpy.einsum('EjI,kE->Ijk', t2abT[b,:], vVoV) * 2 * w += numpy.einsum('mj,mIk->Ijk', t2aaT[b,c], VoOo[a,:]) * w += numpy.einsum('kM,MjI->Ijk', t2abT[b,a], vOoO[c,:]) * 2 * w += numpy.einsum('ejk,Ie->Ijk', t2aaT[b,:], VvOv) * w += numpy.einsum('mI,mjk->Ijk', t2abT[b,a], vooo[c,:]) * 2 * v = numpy.einsum('kj,I->Ijk', vvoo, t1bT[a]) * v += numpy.einsum('Ik,j->Ijk', VvOo, t1aT[b]) * 2 * v += numpy.einsum('jk,I->Ijk', t2aaT[b,c], fVO[a]) * .5 * v += numpy.einsum('kI,j->Ijk', t2abT[c,a], fvo[b]) * 2 * v += w */ dgemm_(&TRANS_T, &TRANS_T, &nocca, &nOo, &nvira, &D2, vvop+nocca, &nmoa, t2abT+a*nOo, &nVoO, &D0, v, &nocca); dgemm_(&TRANS_T, &TRANS_T, &nocca, &nOo, &nvirb, &D2, vVoP+noccb, &nmob, t2abT+b*(size_t)nVoO, &nOo, &D1, v, &nocca); dgemm_(&TRANS_N, &TRANS_T, &nOo, &nocca, &nocca, &D1, VoOo+a*noOo, &nOo, t2aaT+b*nvoo+c*noo, &nocca, &D1, v, &nOo); dgemm_(&TRANS_T, &TRANS_T, &nocca, &nOo, &noccb, &D2, t2abT+b*(size_t)nVoO+a*nOo, &noccb, vOoO+c*nOoO, &nOo, &D1, v, &nocca); for (n = 0, i = 0; i < noccb; i++) { for (j = 0; j < nocca; j++) { for (k = 0; k < nocca; k++, n++) { w[n] = v[j*nOo+i*nocca+k]; } } } dgemm_(&TRANS_N, &TRANS_N, &noo, &noccb, &nvira, &D1, t2aaT+b*nvoo, &noo, VvOp+nocca, &nmoa, &D1, w, &noo); dgemm_(&TRANS_N, &TRANS_T, &noo, &noccb, &nocca, &D2, vooo+c*nooo, &noo, t2abT+b*(size_t)nVoO+a*nOo, &noccb, &D1, w, &noo); double *t1aT2 = cache1; double *fvo2 = t1aT2 + nocca; double *fVOhalf = fvo2 + nocca; for (i = 0; i < nocca; i++) { t1aT2[i] = t1aT[b*nocca+i] * 2; fvo2[i] = fvo[b*nocca+i] * 2; } for (i = 0; i < noccb; i++) { fVOhalf[i] = fVO[a*noccb+i] * .5; } double *pt2aaT = t2aaT + b * nvoo + c * noo; double *pt2abT = t2abT + (c*nvirb+a) * nOo; for (n = 0, i = 0; i < noccb; i++) { for (j = 0; j < nocca; j++) { for (k = 0; k < nocca; k++, n++) { v[n] = (w[n] + vvop[k*nmoa+j] * t1bT[a*noccb+i] + VvOp[i*nmoa+k] * t1aT2[j] + pt2aaT[j*nocca+k] * fVOhalf[i] + pt2abT[k*noccb+i] * fvo2[j]); } } } } /* * w - w.transpose(0,2,1) */ static void permute_baa(double *out, double *w, int nocca, int noccb) { int noo = nocca * nocca; int n; int i, j, k; for (n = 0, i = 0; i < noccb; i++) { for (j = 0; j < nocca; j++) { for (k = 0; k < nocca; k++, n++) { out[n] = w[i*noo+j*nocca+k] - w[i*noo+k*nocca+j]; } } } } static double _get_energy_baa(double *z0, double *z1, double *w0, double *w1, double *mo_ea, double *mo_eb, int nocca, int noccb, int a, int b, int c, double fac) { int noo = nocca * nocca; int i, j, k; double abc = mo_eb[noccb+a] + mo_ea[nocca+b] + mo_ea[nocca+c]; double et = 0; for (i = 0; i < noccb; i++) { for (j = 0; j < nocca; j++) { for (k = 0; k < nocca; k++) { et += (z0[i*noo+j*nocca+k] + z1[i*noo+k*nocca+j]) * (w0[i*noo+j*nocca+k] + w1[i*noo+k*nocca+j]) * fac / (mo_eb[i] + mo_ea[j] + mo_ea[k] - abc); } } } return et; } static double contract6_baa(int nocca, int noccb, int nvira, int nvirb, int a, int b, int c, double **vs_ts, void **cache, double *cache1) { int nOoo = noccb * nocca * nocca; double *v0 = cache1; double *v1 = v0 + nOoo; double *w0 = v1 + nOoo; double *w1 = w0 + nOoo; double *z0 = w1 + nOoo; double *z1 = v0; cache1 += nOoo * 5; get_wv_baa(w0, v0, vs_ts, ((double **)cache) , nocca, noccb, nvira, nvirb, a, b, c, cache1); get_wv_baa(w1, v1, vs_ts, ((double **)cache)+3, nocca, noccb, nvira, nvirb, a, c, b, cache1); permute_baa(z0, v0, nocca, noccb); permute_baa(z1, v1, nocca, noccb); double *mo_ea = vs_ts[0]; double *mo_eb = vs_ts[1]; double et; if (b == c) { et = _get_energy_baa(z0, z1, w0, w1, mo_ea, mo_eb, nocca, noccb, a, b, c, .5); } else { et = _get_energy_baa(z0, z1, w0, w1, mo_ea, mo_eb, nocca, noccb, a, b, c, 1.); } return et; } static size_t gen_baa_jobs(CacheJob *jobs, int nocca, int noccb, int nvira, int nvirb, int a0, int a1, int b0, int b1, void *cache_row_a, void *cache_col_a, void *cache_row_b, void *cache_col_b, size_t stride) { size_t nov = nocca * (nocca+nvira) * stride; size_t noV = nocca * (noccb+nvirb) * stride; size_t nOv = noccb * (nocca+nvira) * stride; int da = a1 - a0; int db = b1 - b0; int a, b, c; size_t m = 0; for (a = a0; a < a1; a++) { for (b = b0; b < b1; b++) { for (c = 0; c <= b; c++, m++) { jobs[m].a = a; jobs[m].b = b; jobs[m].c = c; if (c < b0) { jobs[m].cache[0] = cache_col_b + nov*(db*(c )+b-b0); } else { jobs[m].cache[0] = cache_row_b + nov*(b1*(c-b0)+b ); } jobs[m].cache[1] = cache_col_a + noV*(da *(c )+a-a0); jobs[m].cache[2] = cache_row_a + nOv*(nvira*(a-a0)+c ); jobs[m].cache[3] = cache_row_b + nov*(b1 *(b-b0)+c ); jobs[m].cache[4] = cache_col_a + noV*(da *(b )+a-a0); jobs[m].cache[5] = cache_row_a + nOv*(nvira*(a-a0)+b ); } } } return m; } void CCuccsd_t_baa(double complex *e_tot, double *mo_ea, double *mo_eb, double *t1aT, double *t1bT, double *t2aaT, double *t2abT, double *vooo, double *vOoO, double *VoOo, double *fvo, double *fVO, int nocca, int noccb, int nvira, int nvirb, int a0, int a1, int b0, int b1, void *cache_row_a, void *cache_col_a, void *cache_row_b, void *cache_col_b) { int da = a1 - a0; int db = b1 - b0; CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1); size_t njobs = gen_baa_jobs(jobs, nocca, noccb, nvira, nvirb, a0, a1, b0, b1, cache_row_a, cache_col_a, cache_row_b, cache_col_b, sizeof(double)); double *vs_ts[] = {mo_ea, mo_eb, fvo, fVO, vooo, vOoO, VoOo, t1aT, t1bT, t2aaT, t2abT}; #pragma omp parallel default(none) \ shared(njobs, nocca, noccb, nvira, nvirb, vs_ts, jobs, e_tot) { int a, b, c; size_t k; double *cache1 = malloc(sizeof(double) * (noccb*nocca*nocca*5+1 + nocca*2+noccb*2)); double e = 0; #pragma omp for schedule (dynamic, 4) for (k = 0; k < njobs; k++) { a = jobs[k].a; b = jobs[k].b; c = jobs[k].c; e += contract6_baa(nocca, noccb, nvira, nvirb, a, b, c, vs_ts, jobs[k].cache, cache1); } free(cache1); #pragma omp critical *e_tot += e; } } /* * Complex version of all functions */ static void zadd_and_permute(double complex *out, double complex *w, double complex *v, int n) { int nn = n * n; int nnn = nn * n; int i, j, k; for (i = 0; i < nnn; i++) { v[i] += w[i]; } for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { for (k = 0; k < n; k++) { out[i*nn+j*n+k] = v[i*nn+j*n+k] + v[j*nn+k*n+i] + v[k*nn+i*n+j] - v[k*nn+j*n+i] - v[i*nn+k*n+j] - v[j*nn+i*n+k]; } } } } static void zget_wv(double complex *w, double complex *v, double complex *cache, double complex *fvohalf, double complex *vooo, double complex *vv_op, double complex *t1T, double complex *t2T, int nocc, int nvir, int a, int b, int c, int *idx) { const double complex D0 = 0; const double complex D1 = 1; const double complex DN1 =-1; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const int nmo = nocc + nvir; const int noo = nocc * nocc; const size_t nooo = nocc * noo; const size_t nvoo = nvir * noo; int i, j, k, n; double complex *pt2T; zgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir, &DN1, t2T+c*nvoo, &noo, vv_op+nocc, &nmo, &D0, cache, &noo); zgemm_(&TRANS_N, &TRANS_T, &nocc, &noo, &nocc, &DN1, t2T+b*nvoo+c*noo, &nocc, vooo+a*nooo, &noo, &D1, cache, &nocc); pt2T = t2T + a * nvoo + b * noo; for (n = 0, i = 0; i < nocc; i++) { for (j = 0; j < nocc; j++) { for (k = 0; k < nocc; k++, n++) { w[idx[n]] += cache[n]; v[idx[n]] +=(vv_op[i*nmo+j] * t1T[c*nocc+k] + pt2T[i*nocc+j] * fvohalf[c*nocc+k]); } } } } static double complex zcontract6_aaa(int nocc, int nvir, int a, int b, int c, double *mo_energy, double complex *t1T, double complex *t2T, int nirrep, int *o_ir_loc, int *v_ir_loc, int *oo_ir_loc, int *orbsym, double complex *fvo, double complex *vooo, double complex *cache1, void **cache, int *permute_idx) { int nooo = nocc * nocc * nocc; int *idx0 = permute_idx; int *idx1 = idx0 + nooo; int *idx2 = idx1 + nooo; int *idx3 = idx2 + nooo; int *idx4 = idx3 + nooo; int *idx5 = idx4 + nooo; double complex *v0 = cache1; double complex *w0 = v0 + nooo; double complex *z0 = w0 + nooo; double complex *wtmp = z0; int i; for (i = 0; i < nooo; i++) { w0[i] = 0; v0[i] = 0; } zget_wv(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c, idx0); zget_wv(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b, idx1); zget_wv(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c, idx2); zget_wv(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a, idx3); zget_wv(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b, idx4); zget_wv(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a, idx5); zadd_and_permute(z0, w0, v0, nocc); double complex et; if (a == c) { et = _ccsd_t_zget_energy(w0, z0, mo_energy, nocc, a, b, c, 1./6); } else if (a == b || b == c) { et = _ccsd_t_zget_energy(w0, z0, mo_energy, nocc, a, b, c, .5); } else { et = _ccsd_t_zget_energy(w0, z0, mo_energy, nocc, a, b, c, 1.); } return et; } void CCuccsd_t_zaaa(double complex *e_tot, double *mo_energy, double complex *t1T, double complex *t2T, double complex *vooo, double complex *fvo, int nocc, int nvir, int a0, int a1, int b0, int b1, int nirrep, int *o_ir_loc, int *v_ir_loc, int *oo_ir_loc, int *orbsym, void *cache_row_a, void *cache_col_a, void *cache_row_b, void *cache_col_b) { int da = a1 - a0; int db = b1 - b0; CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1); size_t njobs = _ccsd_t_gen_jobs(jobs, nocc, nvir, a0, a1, b0, b1, cache_row_a, cache_col_a, cache_row_b, cache_col_b, sizeof(double complex)); double complex *fvohalf = malloc(sizeof(double complex) * nvir*nocc); int i; for (i = 0; i < nvir*nocc; i++) { fvohalf[i] = fvo[i] * .5; } int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6); _make_permute_indices(permute_idx, nocc); #pragma omp parallel default(none) \ shared(njobs, nocc, nvir, mo_energy, t1T, t2T, nirrep, o_ir_loc, \ v_ir_loc, oo_ir_loc, orbsym, vooo, fvohalf, jobs, e_tot, \ permute_idx) { int a, b, c; size_t k; double complex *cache1 = malloc(sizeof(double complex) * (nocc*nocc*nocc*3+2)); double complex e = 0; #pragma omp for schedule (dynamic, 4) for (k = 0; k < njobs; k++) { a = jobs[k].a; b = jobs[k].b; c = jobs[k].c; e += zcontract6_aaa(nocc, nvir, a, b, c, mo_energy, t1T, t2T, nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, fvohalf, vooo, cache1, jobs[k].cache, permute_idx); } free(cache1); #pragma omp critical *e_tot += e; } free(permute_idx); free(fvohalf); } /************************************************* * * UCCSD(T) beta-alpha-alpha * *************************************************/ static void zget_wv_baa(double complex *w, double complex *v, double complex **vs_ts, double complex **cache, int nocca, int noccb, int nvira, int nvirb, int a, int b, int c, double complex *cache1) { double complex *fvo = vs_ts[2]; double complex *fVO = vs_ts[3]; double complex *vooo = vs_ts[4]; double complex *vOoO = vs_ts[5]; double complex *VoOo = vs_ts[6]; double complex *t1aT = vs_ts[7]; double complex *t1bT = vs_ts[8]; double complex *t2aaT = vs_ts[9]; double complex *t2abT = vs_ts[10]; double complex *vvop = cache[0]; double complex *vVoP = cache[1]; double complex *VvOp = cache[2]; const double complex D0 = 0; const double complex D1 = 1; const double complex D2 = 2; const char TRANS_T = 'T'; const char TRANS_N = 'N'; const int nmoa = nocca + nvira; const int nmob = noccb + nvirb; const int noo = nocca * nocca; const int nOo = noccb * nocca; const size_t nooo = nocca * noo; const size_t noOo = nocca * nOo; const size_t nOoO = noccb * nOo; const size_t nvoo = nvira * noo; const int nVoO = nvirb * nOo; int i, j, k, n; zgemm_(&TRANS_T, &TRANS_T, &nocca, &nOo, &nvira, &D2, vvop+nocca, &nmoa, t2abT+a*nOo, &nVoO, &D0, v, &nocca); zgemm_(&TRANS_T, &TRANS_T, &nocca, &nOo, &nvirb, &D2, vVoP+noccb, &nmob, t2abT+b*(size_t)nVoO, &nOo, &D1, v, &nocca); zgemm_(&TRANS_N, &TRANS_T, &nOo, &nocca, &nocca, &D1, VoOo+a*noOo, &nOo, t2aaT+b*nvoo+c*noo, &nocca, &D1, v, &nOo); zgemm_(&TRANS_T, &TRANS_T, &nocca, &nOo, &noccb, &D2, t2abT+b*(size_t)nVoO+a*nOo, &noccb, vOoO+c*nOoO, &nOo, &D1, v, &nocca); for (n = 0, i = 0; i < noccb; i++) { for (j = 0; j < nocca; j++) { for (k = 0; k < nocca; k++, n++) { w[n] = v[j*nOo+i*nocca+k]; } } } zgemm_(&TRANS_N, &TRANS_N, &noo, &noccb, &nvira, &D1, t2aaT+b*nvoo, &noo, VvOp+nocca, &nmoa, &D1, w, &noo); zgemm_(&TRANS_N, &TRANS_T, &noo, &noccb, &nocca, &D2, vooo+c*nooo, &noo, t2abT+b*(size_t)nVoO+a*nOo, &noccb, &D1, w, &noo); double complex *t1aT2 = cache1; double complex *fvo2 = t1aT2 + nocca; double complex *fVOhalf = fvo2 + nocca; for (i = 0; i < nocca; i++) { t1aT2[i] = t1aT[b*nocca+i] * 2; fvo2[i] = fvo[b*nocca+i] * 2; } for (i = 0; i < noccb; i++) { fVOhalf[i] = fVO[a*noccb+i] * .5; } double complex *pt2aaT = t2aaT + b * nvoo + c * noo; double complex *pt2abT = t2abT + (c*nvirb+a) * nOo; for (n = 0, i = 0; i < noccb; i++) { for (j = 0; j < nocca; j++) { for (k = 0; k < nocca; k++, n++) { v[n] = (w[n] + vvop[k*nmoa+j] * t1bT[a*noccb+i] + VvOp[i*nmoa+k] * t1aT2[j] + pt2aaT[j*nocca+k] * fVOhalf[i] + pt2abT[k*noccb+i] * fvo2[j]); } } } } /* * w - w.transpose(0,2,1) */ static void zpermute_baa(double complex *out, double complex *w, int nocca, int noccb) { int noo = nocca * nocca; int n; int i, j, k; for (n = 0, i = 0; i < noccb; i++) { for (j = 0; j < nocca; j++) { for (k = 0; k < nocca; k++, n++) { out[n] = w[i*noo+j*nocca+k] - w[i*noo+k*nocca+j]; } } } } static double complex _zget_energy_baa(double complex *z0, double complex *z1, double complex *w0, double complex *w1, double *mo_ea, double *mo_eb, int nocca, int noccb, int a, int b, int c, double fac) { int noo = nocca * nocca; int i, j, k; double abc = mo_eb[noccb+a] + mo_ea[nocca+b] + mo_ea[nocca+c]; double complex et = 0; for (i = 0; i < noccb; i++) { for (j = 0; j < nocca; j++) { for (k = 0; k < nocca; k++) { et += conj(z0[i*noo+j*nocca+k] + z1[i*noo+k*nocca+j]) * (w0[i*noo+j*nocca+k] + w1[i*noo+k*nocca+j]) * (fac / (mo_eb[i] + mo_ea[j] + mo_ea[k] - abc)); } } } return et; } static double complex zcontract6_baa(int nocca, int noccb, int nvira, int nvirb, int a, int b, int c, double complex **vs_ts, void **cache, double complex *cache1) { int nOoo = noccb * nocca * nocca; double complex *v0 = cache1; double complex *v1 = v0 + nOoo; double complex *w0 = v1 + nOoo; double complex *w1 = w0 + nOoo; double complex *z0 = w1 + nOoo; double complex *z1 = v0; cache1 += nOoo * 5; zget_wv_baa(w0, v0, vs_ts, ((double complex **)cache) , nocca, noccb, nvira, nvirb, a, b, c, cache1); zget_wv_baa(w1, v1, vs_ts, ((double complex **)cache)+3, nocca, noccb, nvira, nvirb, a, c, b, cache1); zpermute_baa(z0, v0, nocca, noccb); zpermute_baa(z1, v1, nocca, noccb); double *mo_ea = (double *)vs_ts[0]; double *mo_eb = (double *)vs_ts[1]; double complex et; if (b == c) { et = _zget_energy_baa(z0, z1, w0, w1, mo_ea, mo_eb, nocca, noccb, a, b, c, .5); } else { et = _zget_energy_baa(z0, z1, w0, w1, mo_ea, mo_eb, nocca, noccb, a, b, c, 1.); } return et; } void CCuccsd_t_zbaa(double complex *e_tot, double *mo_ea, double *mo_eb, double complex *t1aT, double complex *t1bT, double complex *t2aaT, double complex *t2abT, double complex *vooo, double complex *vOoO, double complex *VoOo, double complex *fvo, double complex *fVO, int nocca, int noccb, int nvira, int nvirb, int a0, int a1, int b0, int b1, void *cache_row_a, void *cache_col_a, void *cache_row_b, void *cache_col_b) { int da = a1 - a0; int db = b1 - b0; CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1); size_t njobs = gen_baa_jobs(jobs, nocca, noccb, nvira, nvirb, a0, a1, b0, b1, cache_row_a, cache_col_a, cache_row_b, cache_col_b, sizeof(double complex)); double complex *vs_ts[] = {(double complex *)mo_ea, (double complex *)mo_eb, fvo, fVO, vooo, vOoO, VoOo, t1aT, t1bT, t2aaT, t2abT}; #pragma omp parallel default(none) \ shared(njobs, nocca, noccb, nvira, nvirb, vs_ts, jobs, e_tot) { int a, b, c; size_t k; double complex *cache1 = malloc(sizeof(double complex) * (noccb*nocca*nocca*5+1 + nocca*2+noccb*2)); double complex e = 0; #pragma omp for schedule (dynamic, 4) for (k = 0; k < njobs; k++) { a = jobs[k].a; b = jobs[k].b; c = jobs[k].c; e += zcontract6_baa(nocca, noccb, nvira, nvirb, a, b, c, vs_ts, jobs[k].cache, cache1); } free(cache1); #pragma omp critical *e_tot += e; } }
array_args.h
#ifndef LIGHTGBM_UTILS_ARRAY_AGRS_H_ #define LIGHTGBM_UTILS_ARRAY_AGRS_H_ #include <vector> #include <algorithm> #include <LightGBM/utils/openmp_wrapper.h> namespace LightGBM { /*! * \brief Contains some operation for a array, e.g. ArgMax, TopK. */ template<typename VAL_T> class ArrayArgs { public: inline static size_t ArgMaxMT(const std::vector<VAL_T>& array) { int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } int step = std::max(1, (static_cast<int>(array.size()) + num_threads - 1) / num_threads); std::vector<size_t> arg_maxs(num_threads, 0); #pragma omp parallel for schedule(static,1) for (int i = 0; i < num_threads; ++i) { size_t start = step * i; if (start >= array.size()) { continue; } size_t end = std::min(array.size(), start + step); size_t arg_max = start; for (size_t j = start + 1; j < end; ++j) { if (array[j] > array[arg_max]) { arg_max = j; } } arg_maxs[i] = arg_max; } size_t ret = arg_maxs[0]; for (int i = 1; i < num_threads; ++i) { if (array[arg_maxs[i]] > array[ret]) { ret = arg_maxs[i]; } } return ret; } inline static size_t ArgMax(const std::vector<VAL_T>& array) { if (array.empty()) { return 0; } if (array.size() > 1024) { return ArgMaxMT(array); } else { size_t arg_max = 0; for (size_t i = 1; i < array.size(); ++i) { if (array[i] > array[arg_max]) { arg_max = i; } } return arg_max; } } inline static size_t ArgMin(const std::vector<VAL_T>& array) { if (array.empty()) { return 0; } size_t arg_min = 0; for (size_t i = 1; i < array.size(); ++i) { if (array[i] < array[arg_min]) { arg_min = i; } } return arg_min; } inline static size_t ArgMax(const VAL_T* array, size_t n) { if (n <= 0) { return 0; } size_t arg_max = 0; for (size_t i = 1; i < n; ++i) { if (array[i] > array[arg_max]) { arg_max = i; } } return arg_max; } inline static size_t ArgMin(const VAL_T* array, size_t n) { if (n <= 0) { return 0; } size_t arg_min = 0; for (size_t i = 1; i < n; ++i) { if (array[i] < array[arg_min]) { arg_min = i; } } return arg_min; } inline static void Partition(std::vector<VAL_T>* arr, int start, int end, int* l, int* r) { int i = start - 1; int j = end - 1; int p = i; int q = j; if (start >= end) { return; } std::vector<VAL_T>& ref = *arr; VAL_T v = ref[end - 1]; for (;;) { while (ref[++i] > v); while (v > ref[--j]) { if (j == start) { break; } } if (i >= j) { break; } std::swap(ref[i], ref[j]); if (ref[i] == v) { p++; std::swap(ref[p], ref[i]); } if (v == ref[j]) { q--; std::swap(ref[j], ref[q]); } } std::swap(ref[i], ref[end - 1]); j = i - 1; i = i + 1; for (int k = start; k <= p; k++, j--) { std::swap(ref[k], ref[j]); } for (int k = end - 2; k >= q; k--, i++) { std::swap(ref[i], ref[k]); } *l = j; *r = i; }; // Note: k refer to index here. e.g. k=0 means get the max number. inline static int ArgMaxAtK(std::vector<VAL_T>* arr, int start, int end, int k) { if (start >= end - 1) { return start; } int l = start; int r = end - 1; Partition(arr, start, end, &l, &r); // if find or all elements are the same. if ((k > l && k < r) || (l == start - 1 && r == end - 1)) { return k; } else if (k <= l) { return ArgMaxAtK(arr, start, l + 1, k); } else { return ArgMaxAtK(arr, r, end, k); } } // Note: k is 1-based here. e.g. k=3 means get the top-3 numbers. inline static void MaxK(const std::vector<VAL_T>& array, int k, std::vector<VAL_T>* out) { out->clear(); if (k <= 0) { return; } for (auto val : array) { out->push_back(val); } if (static_cast<size_t>(k) >= array.size()) { return; } ArgMaxAtK(out, 0, static_cast<int>(out->size()), k - 1); out->erase(out->begin() + k, out->end()); } }; } // namespace LightGBM #endif // LightGBM_UTILS_ARRAY_AGRS_H_
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(8*t2-Nz,4)),t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(4*t1+Ny+5,4)),floord(8*t2+Ny+4,4)),floord(8*t1-8*t2+Nz+Ny+3,4));t3++) { for (t4=max(max(max(0,ceild(t1-31,32)),ceild(8*t2-Nz-124,128)),ceild(4*t3-Ny-124,128));t4<=min(min(min(min(floord(4*t3+Nx,128),floord(Nt+Nx-4,128)),floord(4*t1+Nx+5,128)),floord(8*t2+Nx+4,128)),floord(8*t1-8*t2+Nz+Nx+3,128));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),4*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),4*t3+2),128*t4+126),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_unaryop__identity_uint8_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint8_uint16 // op(A') function: GB_tran__identity_uint8_uint16 // C type: uint8_t // A type: uint16_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint8_uint16 ( uint8_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint8_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cg.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - CG This benchmark is an OpenMP C version of the NPB CG code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: M. Yarrow C. Kuszmaul OpenMP C version: S. Satoh --------------------------------------------------------------------*/ /* c--------------------------------------------------------------------- c Note: please observe that in the routine conj_grad three c implementations of the sparse matrix-vector multiply have c been supplied. The default matrix-vector multiply is not c loop unrolled. The alternate implementations are unrolled c to a depth of 2 and unrolled to a depth of 8. Please c experiment with these to find the fastest for your particular c architecture. If reporting timing results, any of these three may c be used without penalty. c--------------------------------------------------------------------- */ #include "npb-C.h" #include "npbparams.h" #undef NZ #define NZ NA*(NONZER+1)*(NONZER+1)+NA*(NONZER+2) /* global variables */ /* common /partit_size/ */ static int naa; static int nzz; static int firstrow; static int lastrow; static int firstcol; static int lastcol; /* common /main_int_mem/ */ static int colidx[NZ+1]; /* colidx[1:NZ] */ static int rowstr[NA+1+1]; /* rowstr[1:NA+1] */ static int iv[2*NA+1+1]; /* iv[1:2*NA+1] */ static int arow[NZ+1]; /* arow[1:NZ] */ static int acol[NZ+1]; /* acol[1:NZ] */ /* common /main_flt_mem/ */ static double v[NA+1+1]; /* v[1:NA+1] */ static double aelt[NZ+1]; /* aelt[1:NZ] */ static double a[NZ+1]; /* a[1:NZ] */ static double x[NA+2+1]; /* x[1:NA+2] */ static double z[NA+2+1]; /* z[1:NA+2] */ static double p[NA+2+1]; /* p[1:NA+2] */ static double q[NA+2+1]; /* q[1:NA+2] */ static double r[NA+2+1]; /* r[1:NA+2] */ static double w[NA+2+1]; /* w[1:NA+2] */ /* common /urando/ */ static double amult; static double tran; /* function declarations */ static void conj_grad (int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double w[], double *rnorm); static void makea(int n, int nz, double a[], int colidx[], int rowstr[], int nonzer, int firstrow, int lastrow, int firstcol, int lastcol, double rcond, int arow[], int acol[], double aelt[], double v[], int iv[], double shift ); static void sparse(double a[], int colidx[], int rowstr[], int n, int arow[], int acol[], double aelt[], int firstrow, int lastrow, double x[], boolean mark[], int nzloc[], int nnza); static void sprnvc(int n, int nz, double v[], int iv[], int nzloc[], int mark[]); static int icnvrt(double x, int ipwr2); static void vecset(int n, double v[], int iv[], int *nzv, int i, double val); /*-------------------------------------------------------------------- program cg --------------------------------------------------------------------*/ int main(int argc, char **argv) { int i, j, k, it; int nthreads = 1; double zeta; double rnorm; double norm_temp11; double norm_temp12; double t, mflops; char class; boolean verified; double zeta_verify_value, epsilon; if (argc != 2) { /* Print usage */ printf("Usage: %s <Number of threads>\n", argv[0]); abort(); } #ifdef BOMP bomp_bomp_init(atoi(argv[1])); #endif printf("Benchmark start"); omp_set_num_threads(atoi(argv[1])); firstrow = 1; lastrow = NA; firstcol = 1; lastcol = NA; if (NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10.0) { class = 'S'; zeta_verify_value = 8.5971775078648; } else if (NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12.0) { class = 'W'; zeta_verify_value = 10.362595087124; } else if (NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20.0) { class = 'A'; zeta_verify_value = 17.130235054029; } else if (NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60.0) { class = 'B'; zeta_verify_value = 22.712745482631; } else if (NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110.0) { class = 'C'; zeta_verify_value = 28.973605592845; } else { class = 'U'; } printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - CG Benchmark\n"); printf(" Size: %10d\n", NA); printf(" Iterations: %5d\n", NITER); naa = NA; nzz = NZ; /*-------------------------------------------------------------------- c Initialize random number generator c-------------------------------------------------------------------*/ tran = 314159265.0; amult = 1220703125.0; zeta = randlc( &tran, amult ); /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ makea(naa, nzz, a, colidx, rowstr, NONZER, firstrow, lastrow, firstcol, lastcol, RCOND, arow, acol, aelt, v, iv, SHIFT); /*--------------------------------------------------------------------- c Note: as a result of the above call to makea: c values of j used in indexing rowstr go from 1 --> lastrow-firstrow+1 c values of colidx which are col indexes go from firstcol --> lastcol c So: c Shift the col index vals from actual (firstcol --> lastcol ) c to local, i.e., (1 --> lastcol-firstcol+1) c---------------------------------------------------------------------*/ #pragma omp parallel private(it,i,j,k) { #pragma omp for nowait for (j = 1; j <= lastrow - firstrow + 1; j++) { for (k = rowstr[j]; k < rowstr[j+1]; k++) { colidx[k] = colidx[k] - firstcol + 1; } } /*-------------------------------------------------------------------- c set starting vector to (1, 1, .... 1) c-------------------------------------------------------------------*/ #pragma omp for nowait for (i = 1; i <= NA+1; i++) { x[i] = 1.0; } #pragma omp single zeta = 0.0; /*------------------------------------------------------------------- c----> c Do one iteration untimed to init all code and data page tables c----> (then reinit, start timing, to niter its) c-------------------------------------------------------------------*/ for (it = 1; it <= 1; it++) { /*-------------------------------------------------------------------- c The call to the conjugate gradient routine: c-------------------------------------------------------------------*/ conj_grad (colidx, rowstr, x, z, a, p, q, r, w, &rnorm); /*-------------------------------------------------------------------- c zeta = shift + 1/(x.z) c So, first: (x.z) c Also, find norm of z c So, first: (z.z) c-------------------------------------------------------------------*/ #pragma omp single { norm_temp11 = 0.0; norm_temp12 = 0.0; } /* end single */ #pragma omp for reduction(+:norm_temp11,norm_temp12) for (j = 1; j <= lastcol-firstcol+1; j++) { norm_temp11 = norm_temp11 + x[j]*z[j]; norm_temp12 = norm_temp12 + z[j]*z[j]; } #pragma omp single norm_temp12 = 1.0 / sqrt( norm_temp12 ); /*-------------------------------------------------------------------- c Normalize z to obtain x c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j <= lastcol-firstcol+1; j++) { x[j] = norm_temp12*z[j]; } } /* end of do one iteration untimed */ /*-------------------------------------------------------------------- c set starting vector to (1, 1, .... 1) c-------------------------------------------------------------------*/ #pragma omp for nowait for (i = 1; i <= NA+1; i++) { x[i] = 1.0; } #pragma omp single zeta = 0.0; } /* end parallel */ timer_clear( 1 ); timer_start( 1 ); /*-------------------------------------------------------------------- c----> c Main Iteration for inverse power method c----> c-------------------------------------------------------------------*/ #pragma omp parallel private(it,i,j,k) { for (it = 1; it <= NITER; it++) { /*-------------------------------------------------------------------- c The call to the conjugate gradient routine: c-------------------------------------------------------------------*/ conj_grad(colidx, rowstr, x, z, a, p, q, r, w, &rnorm); /*-------------------------------------------------------------------- c zeta = shift + 1/(x.z) c So, first: (x.z) c Also, find norm of z c So, first: (z.z) c-------------------------------------------------------------------*/ #pragma omp single { norm_temp11 = 0.0; norm_temp12 = 0.0; } /* end single */ #pragma omp for reduction(+:norm_temp11,norm_temp12) for (j = 1; j <= lastcol-firstcol+1; j++) { norm_temp11 = norm_temp11 + x[j]*z[j]; norm_temp12 = norm_temp12 + z[j]*z[j]; } #pragma omp single { norm_temp12 = 1.0 / sqrt( norm_temp12 ); zeta = SHIFT + 1.0 / norm_temp11; } /* end single */ #pragma omp master { /* if( it == 1 ) { */ /* printf(" iteration ||r|| zeta\n"); */ /* } */ /* printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta); */ } /* end master */ /*-------------------------------------------------------------------- c Normalize z to obtain x c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j <= lastcol-firstcol+1; j++) { x[j] = norm_temp12*z[j]; } } /* end of main iter inv pow meth */ #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end parallel */ timer_stop( 1 ); /*-------------------------------------------------------------------- c End of timed section c-------------------------------------------------------------------*/ t = timer_read( 1 ); printf(" Benchmark completed\n"); epsilon = 1.0e-10; if (class != 'U') { if (fabs(zeta - zeta_verify_value) <= epsilon) { verified = TRUE; printf(" VERIFICATION SUCCESSFUL\n"); printf(" Zeta is %20.12e\n", zeta); printf(" Error is %20.12e\n", zeta - zeta_verify_value); } else { verified = FALSE; printf(" VERIFICATION FAILED\n"); printf(" Zeta %20.12e\n", zeta); printf(" The correct zeta is %20.12e\n", zeta_verify_value); } } else { verified = FALSE; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); } if ( t != 0.0 ) { mflops = (2.0*NITER*NA) * (3.0+(NONZER*(NONZER+1)) + 25.0*(5.0+(NONZER*(NONZER+1))) + 3.0 ) / t / 1000000.0; } else { mflops = 0.0; } #ifdef BOMP //backend_create_time(atoi(argv[1])); #endif printf("Computetime %d %f\n", atoi(argv[1]), t); printf("client done\n"); /* c_print_results("CG", class, NA, 0, 0, NITER, nthreads, t, */ /* mflops, " floating point", */ /* verified, NPBVERSION, COMPILETIME, */ /* CS1, CS2, CS3, CS4, CS5, CS6, CS7); */ } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void conj_grad ( int colidx[], /* colidx[1:nzz] */ int rowstr[], /* rowstr[1:naa+1] */ double x[], /* x[*] */ double z[], /* z[*] */ double a[], /* a[1:nzz] */ double p[], /* p[*] */ double q[], /* q[*] */ double r[], /* r[*] */ double w[], /* w[*] */ double *rnorm ) /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*--------------------------------------------------------------------- c Floaging point arrays here are named as in NPB1 spec discussion of c CG algorithm c---------------------------------------------------------------------*/ { static double d, sum, rho, rho0, alpha, beta; int i, j, k; int cgit, cgitmax = 25; #pragma omp single nowait rho = 0.0; /*-------------------------------------------------------------------- c Initialize the CG algorithm: c-------------------------------------------------------------------*/ #pragma omp for nowait for (j = 1; j <= naa+1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = x[j]; p[j] = r[j]; w[j] = 0.0; } /*-------------------------------------------------------------------- c rho = r.r c Now, obtain the norm of r: First, sum squares of r elements locally... c-------------------------------------------------------------------*/ #pragma omp for reduction(+:rho) for (j = 1; j <= lastcol-firstcol+1; j++) { rho = rho + x[j]*x[j]; } /*-------------------------------------------------------------------- c----> c The conj grad iteration loop c----> c-------------------------------------------------------------------*/ for (cgit = 1; cgit <= cgitmax; cgit++) { #pragma omp single nowait { rho0 = rho; d = 0.0; rho = 0.0; } /* end single */ /*-------------------------------------------------------------------- c q = A.p c The partition submatrix-vector multiply: use workspace w c--------------------------------------------------------------------- C C NOTE: this version of the multiply is actually (slightly: maybe %5) C faster on the sp2 on 16 nodes than is the unrolled-by-2 version C below. On the Cray t3d, the reverse is true, i.e., the C unrolled-by-two version is some 10% faster. C The unrolled-by-8 version below is significantly faster C on the Cray t3d - overall speed of code is 1.5 times faster. */ /* rolled version */ #pragma omp for private(sum,k) for (j = 1; j <= lastrow-firstrow+1; j++) { sum = 0.0; for (k = rowstr[j]; k < rowstr[j+1]; k++) { sum = sum + a[k]*p[colidx[k]]; } w[j] = sum; } /* unrolled-by-two version #pragma omp for private(i,k) for (j = 1; j <= lastrow-firstrow+1; j++) { int iresidue; double sum1, sum2; i = rowstr[j]; iresidue = (rowstr[j+1]-i) % 2; sum1 = 0.0; sum2 = 0.0; if (iresidue == 1) sum1 = sum1 + a[i]*p[colidx[i]]; for (k = i+iresidue; k <= rowstr[j+1]-2; k += 2) { sum1 = sum1 + a[k] * p[colidx[k]]; sum2 = sum2 + a[k+1] * p[colidx[k+1]]; } w[j] = sum1 + sum2; } */ /* unrolled-by-8 version #pragma omp for private(i,k,sum) for (j = 1; j <= lastrow-firstrow+1; j++) { int iresidue; i = rowstr[j]; iresidue = (rowstr[j+1]-i) % 8; sum = 0.0; for (k = i; k <= i+iresidue-1; k++) { sum = sum + a[k] * p[colidx[k]]; } for (k = i+iresidue; k <= rowstr[j+1]-8; k += 8) { sum = sum + a[k ] * p[colidx[k ]] + a[k+1] * p[colidx[k+1]] + a[k+2] * p[colidx[k+2]] + a[k+3] * p[colidx[k+3]] + a[k+4] * p[colidx[k+4]] + a[k+5] * p[colidx[k+5]] + a[k+6] * p[colidx[k+6]] + a[k+7] * p[colidx[k+7]]; } w[j] = sum; } */ #pragma omp for for (j = 1; j <= lastcol-firstcol+1; j++) { q[j] = w[j]; } /*-------------------------------------------------------------------- c Clear w for reuse... c-------------------------------------------------------------------*/ #pragma omp for nowait for (j = 1; j <= lastcol-firstcol+1; j++) { w[j] = 0.0; } /*-------------------------------------------------------------------- c Obtain p.q c-------------------------------------------------------------------*/ #pragma omp for reduction(+:d) for (j = 1; j <= lastcol-firstcol+1; j++) { d = d + p[j]*q[j]; } /*-------------------------------------------------------------------- c Obtain alpha = rho / (p.q) c-------------------------------------------------------------------*/ #pragma omp single alpha = rho0 / d; /*-------------------------------------------------------------------- c Save a temporary of rho c-------------------------------------------------------------------*/ /* rho0 = rho;*/ /*--------------------------------------------------------------------- c Obtain z = z + alpha*p c and r = r - alpha*q c---------------------------------------------------------------------*/ #pragma omp for for (j = 1; j <= lastcol-firstcol+1; j++) { z[j] = z[j] + alpha*p[j]; r[j] = r[j] - alpha*q[j]; } /*--------------------------------------------------------------------- c rho = r.r c Now, obtain the norm of r: First, sum squares of r elements locally... c---------------------------------------------------------------------*/ #pragma omp for reduction(+:rho) for (j = 1; j <= lastcol-firstcol+1; j++) { rho = rho + r[j]*r[j]; } /*-------------------------------------------------------------------- c Obtain beta: c-------------------------------------------------------------------*/ #pragma omp single beta = rho / rho0; /*-------------------------------------------------------------------- c p = r + beta*p c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j <= lastcol-firstcol+1; j++) { p[j] = r[j] + beta*p[j]; } } /* end of do cgit=1,cgitmax */ /*--------------------------------------------------------------------- c Compute residual norm explicitly: ||r|| = ||x - A.z|| c First, form A.z c The partition submatrix-vector multiply c---------------------------------------------------------------------*/ #pragma omp single nowait sum = 0.0; #pragma omp for private(d, k) for (j = 1; j <= lastrow-firstrow+1; j++) { d = 0.0; for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) { d = d + a[k]*z[colidx[k]]; } w[j] = d; } #pragma omp for for (j = 1; j <= lastcol-firstcol+1; j++) { r[j] = w[j]; } /*-------------------------------------------------------------------- c At this point, r contains A.z c-------------------------------------------------------------------*/ #pragma omp for reduction(+:sum) private(d) for (j = 1; j <= lastcol-firstcol+1; j++) { d = x[j] - r[j]; sum = sum + d*d; } #pragma omp single { (*rnorm) = sqrt(sum); } /* end single */ } /*--------------------------------------------------------------------- c generate the test problem for benchmark 6 c makea generates a sparse matrix with a c prescribed sparsity distribution c c parameter type usage c c input c c n i number of cols/rows of matrix c nz i nonzeros as declared array size c rcond r*8 condition number c shift r*8 main diagonal shift c c output c c a r*8 array for nonzeros c colidx i col indices c rowstr i row pointers c c workspace c c iv, arow, acol i c v, aelt r*8 c---------------------------------------------------------------------*/ static void makea( int n, int nz, double a[], /* a[1:nz] */ int colidx[], /* colidx[1:nz] */ int rowstr[], /* rowstr[1:n+1] */ int nonzer, int firstrow, int lastrow, int firstcol, int lastcol, double rcond, int arow[], /* arow[1:nz] */ int acol[], /* acol[1:nz] */ double aelt[], /* aelt[1:nz] */ double v[], /* v[1:n+1] */ int iv[], /* iv[1:2*n+1] */ double shift ) { int i, nnza, iouter, ivelt, ivelt1, irow, nzv; /*-------------------------------------------------------------------- c nonzer is approximately (int(sqrt(nnza /n))); c-------------------------------------------------------------------*/ double size, ratio, scale; int jcol; size = 1.0; ratio = pow(rcond, (1.0 / (double)n)); nnza = 0; /*--------------------------------------------------------------------- c Initialize colidx(n+1 .. 2n) to zero. c Used by sprnvc to mark nonzero positions c---------------------------------------------------------------------*/ #pragma omp parallel for for (i = 1; i <= n; i++) { colidx[n+i] = 0; } for (iouter = 1; iouter <= n; iouter++) { nzv = nonzer; sprnvc(n, nzv, v, iv, &(colidx[0]), &(colidx[n])); vecset(n, v, iv, &nzv, iouter, 0.5); for (ivelt = 1; ivelt <= nzv; ivelt++) { jcol = iv[ivelt]; if (jcol >= firstcol && jcol <= lastcol) { scale = size * v[ivelt]; for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) { irow = iv[ivelt1]; if (irow >= firstrow && irow <= lastrow) { nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in" " makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = jcol; arow[nnza] = irow; aelt[nnza] = v[ivelt1] * scale; } } } } size = size * ratio; } /*--------------------------------------------------------------------- c ... add the identity * rcond to the generated matrix to bound c the smallest eigenvalue from below by rcond c---------------------------------------------------------------------*/ for (i = firstrow; i <= lastrow; i++) { if (i >= firstcol && i <= lastcol) { iouter = n + i; nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = i; arow[nnza] = i; aelt[nnza] = rcond - shift; } } /*--------------------------------------------------------------------- c ... make the sparse matrix from list of elements with duplicates c (v and iv are used as workspace) c---------------------------------------------------------------------*/ sparse(a, colidx, rowstr, n, arow, acol, aelt, firstrow, lastrow, v, &(iv[0]), &(iv[n]), nnza); } /*--------------------------------------------------- c generate a sparse matrix from a list of c [col, row, element] tri c---------------------------------------------------*/ static void sparse( double a[], /* a[1:*] */ int colidx[], /* colidx[1:*] */ int rowstr[], /* rowstr[1:*] */ int n, int arow[], /* arow[1:*] */ int acol[], /* acol[1:*] */ double aelt[], /* aelt[1:*] */ int firstrow, int lastrow, double x[], /* x[1:n] */ boolean mark[], /* mark[1:n] */ int nzloc[], /* nzloc[1:n] */ int nnza) /*--------------------------------------------------------------------- c rows range from firstrow to lastrow c the rowstr pointers are defined for nrows = lastrow-firstrow+1 values c---------------------------------------------------------------------*/ { int nrows; int i, j, jajp1, nza, k, nzrow; double xi; /*-------------------------------------------------------------------- c how many rows of result c-------------------------------------------------------------------*/ nrows = lastrow - firstrow + 1; /*-------------------------------------------------------------------- c ...count the number of triples in each row c-------------------------------------------------------------------*/ #pragma omp parallel for for (j = 1; j <= n; j++) { rowstr[j] = 0; mark[j] = FALSE; } rowstr[n+1] = 0; for (nza = 1; nza <= nnza; nza++) { j = (arow[nza] - firstrow + 1) + 1; rowstr[j] = rowstr[j] + 1; } rowstr[1] = 1; for (j = 2; j <= nrows+1; j++) { rowstr[j] = rowstr[j] + rowstr[j-1]; } /*--------------------------------------------------------------------- c ... rowstr(j) now is the location of the first nonzero c of row j of a c---------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c ... do a bucket sort of the triples on the row index c-------------------------------------------------------------------*/ for (nza = 1; nza <= nnza; nza++) { j = arow[nza] - firstrow + 1; k = rowstr[j]; a[k] = aelt[nza]; colidx[k] = acol[nza]; rowstr[j] = rowstr[j] + 1; } /*-------------------------------------------------------------------- c ... rowstr(j) now points to the first element of row j+1 c-------------------------------------------------------------------*/ for (j = nrows; j >= 1; j--) { rowstr[j+1] = rowstr[j]; } rowstr[1] = 1; /*-------------------------------------------------------------------- c ... generate the actual output rows by adding elements c-------------------------------------------------------------------*/ nza = 0; #pragma omp parallel for for (i = 1; i <= n; i++) { x[i] = 0.0; mark[i] = FALSE; } jajp1 = rowstr[1]; for (j = 1; j <= nrows; j++) { nzrow = 0; /*-------------------------------------------------------------------- c ...loop over the jth row of a c-------------------------------------------------------------------*/ for (k = jajp1; k < rowstr[j+1]; k++) { i = colidx[k]; x[i] = x[i] + a[k]; if ( mark[i] == FALSE && x[i] != 0.0) { mark[i] = TRUE; nzrow = nzrow + 1; nzloc[nzrow] = i; } } /*-------------------------------------------------------------------- c ... extract the nonzeros of this row c-------------------------------------------------------------------*/ for (k = 1; k <= nzrow; k++) { i = nzloc[k]; mark[i] = FALSE; xi = x[i]; x[i] = 0.0; if (xi != 0.0) { nza = nza + 1; a[nza] = xi; colidx[nza] = i; } } jajp1 = rowstr[j+1]; rowstr[j+1] = nza + rowstr[1]; } } /*--------------------------------------------------------------------- c generate a sparse n-vector (v, iv) c having nzv nonzeros c c mark(i) is set to 1 if position i is nonzero. c mark is all zero on entry and is reset to all zero before exit c this corrects a performance bug found by John G. Lewis, caused by c reinitialization of mark on every one of the n calls to sprnvc ---------------------------------------------------------------------*/ static void sprnvc( int n, int nz, double v[], /* v[1:*] */ int iv[], /* iv[1:*] */ int nzloc[], /* nzloc[1:n] */ int mark[] ) /* mark[1:n] */ { int nn1; int nzrow, nzv, ii, i; double vecelt, vecloc; nzv = 0; nzrow = 0; nn1 = 1; do { nn1 = 2 * nn1; } while (nn1 < n); /*-------------------------------------------------------------------- c nn1 is the smallest power of two not less than n c-------------------------------------------------------------------*/ while (nzv < nz) { vecelt = randlc(&tran, amult); /*-------------------------------------------------------------------- c generate an integer between 1 and n in a portable manner c-------------------------------------------------------------------*/ vecloc = randlc(&tran, amult); i = icnvrt(vecloc, nn1) + 1; if (i > n) continue; /*-------------------------------------------------------------------- c was this integer generated already? c-------------------------------------------------------------------*/ if (mark[i] == 0) { mark[i] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i; nzv = nzv + 1; v[nzv] = vecelt; iv[nzv] = i; } } for (ii = 1; ii <= nzrow; ii++) { i = nzloc[ii]; mark[i] = 0; } } /*--------------------------------------------------------------------- * scale a double precision number x in (0,1) by a power of 2 and chop it *---------------------------------------------------------------------*/ static int icnvrt(double x, int ipwr2) { return ((int)(ipwr2 * x)); } /*-------------------------------------------------------------------- c set ith element of sparse vector (v, iv) with c nzv nonzeros to val c-------------------------------------------------------------------*/ static void vecset( int n, double v[], /* v[1:*] */ int iv[], /* iv[1:*] */ int *nzv, int i, double val) { int k; boolean set; set = FALSE; for (k = 1; k <= *nzv; k++) { if (iv[k] == i) { v[k] = val; set = TRUE; } } if (set == FALSE) { *nzv = *nzv + 1; v[*nzv] = val; iv[*nzv] = i; } }
SplineC2RAdoptor.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign // Jeongnim Kim, jeongnim.kim@intel.com, University of Illinois at Urbana-Champaign // Ye Luo, yeluo@anl.gov, Argonne National Laboratory // Anouar Benali, benali@anl.gov, Argonne National Laboratory // Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory // // File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign ////////////////////////////////////////////////////////////////////////////////////// /** @file SplineC2RSoA.h * * Adoptor classes to handle complex-to-(real,complex) with arbitrary precision */ #ifndef QMCPLUSPLUS_EINSPLINE_C2R_ADOPTOR_H #define QMCPLUSPLUS_EINSPLINE_C2R_ADOPTOR_H #include <OhmmsSoA/Container.h> #include <spline2/MultiBspline.hpp> #include <spline2/MultiBsplineEval.hpp> #include "QMCWaveFunctions/BsplineFactory/SplineAdoptorBase.h" #include <Utilities/FairDivide.h> namespace qmcplusplus { /** adoptor class to match std::complex<ST> spline with TT real SPOs * @tparam ST precision of spline * @tparam TT precision of SPOs * @tparam D dimension * * Requires temporage storage and multiplication of phase vectors * Internal storage use double sized arrays of ST type, aligned and padded. */ template<typename ST, typename TT> struct SplineC2RSoA: public SplineAdoptorBase<ST,3> { static const int D=3; using BaseType=SplineAdoptorBase<ST,3>; using SplineType=typename bspline_traits<ST,3>::SplineType; using BCType=typename bspline_traits<ST,3>::BCType; using DataType=ST; using PointType=typename BaseType::PointType; using SingleSplineType=typename BaseType::SingleSplineType; using vContainer_type=Vector<ST,aligned_allocator<ST> >; using gContainer_type=VectorSoaContainer<ST,3>; using hContainer_type=VectorSoaContainer<ST,6>; using BaseType::first_spo; using BaseType::last_spo; using BaseType::GGt; using BaseType::PrimLattice; using BaseType::kPoints; using BaseType::MakeTwoCopies; using BaseType::offset; ///number of complex bands int nComplexBands; ///number of points of the original grid int BaseN[3]; ///offset of the original grid, always 0 int BaseOffset[3]; ///multi bspline set MultiBspline<ST>* SplineInst; ///expose the pointer to reuse the reader and only assigned with create_spline ///also used as identifier of shallow copy SplineType* MultiSpline; vContainer_type mKK; VectorSoaContainer<ST,3> myKcart; vContainer_type myV; vContainer_type myL; gContainer_type myG; hContainer_type myH; SplineC2RSoA(): BaseType(), nComplexBands(0), SplineInst(nullptr), MultiSpline(nullptr) { this->is_complex=true; this->is_soa_ready=true; this->AdoptorName="SplineC2RSoAAdoptor"; this->KeyWord="SplineC2RSoA"; } SplineC2RSoA(const SplineC2RSoA& a): SplineAdoptorBase<ST,3>(a),SplineInst(a.SplineInst),MultiSpline(nullptr), nComplexBands(a.nComplexBands),mKK(a.mKK), myKcart(a.myKcart) { const size_t n=a.myL.size(); myV.resize(n); myG.resize(n); myL.resize(n); myH.resize(n); } ~SplineC2RSoA() { if(MultiSpline != nullptr) delete SplineInst; } inline void resizeStorage(size_t n, size_t nvals) { BaseType::init_base(n); size_t npad=getAlignedSize<ST>(2*n); myV.resize(npad); myG.resize(npad); myL.resize(npad); myH.resize(npad); } void bcast_tables(Communicate* comm) { chunked_bcast(comm, MultiSpline); } void gather_tables(Communicate* comm) { if(comm->size()==1) return; const int Nbands = kPoints.size(); const int Nbandgroups = comm->size(); offset.resize(Nbandgroups+1,0); FairDivideLow(Nbands,Nbandgroups,offset); for(size_t ib=0; ib<offset.size(); ib++) offset[ib] = offset[ib]*2; gatherv(comm, MultiSpline, MultiSpline->z_stride, offset); } template<typename GT, typename BCT> void create_spline(GT& xyz_g, BCT& xyz_bc) { resize_kpoints(); SplineInst=new MultiBspline<ST>(); SplineInst->create(xyz_g,xyz_bc,myV.size()); MultiSpline=SplineInst->spline_m; for(size_t i=0; i<D; ++i) { BaseOffset[i]=0; BaseN[i]=xyz_g[i].num+3; } qmc_common.memory_allocated += SplineInst->sizeInByte(); } inline void flush_zero() { SplineInst->flush_zero(); } /** remap kPoints to pack the double copy */ inline void resize_kpoints() { #ifndef QMC_CUDA // GPU CUDA code doesn't allow a change of the ordering nComplexBands=this->remap_kpoints(); #endif int nk=kPoints.size(); mKK.resize(nk); myKcart.resize(nk); for(size_t i=0; i<nk; ++i) { mKK[i]=-dot(kPoints[i],kPoints[i]); myKcart(i)=kPoints[i]; } } inline void set_spline(SingleSplineType* spline_r, SingleSplineType* spline_i, int twist, int ispline, int level) { SplineInst->copy_spline(spline_r,2*ispline ,BaseOffset, BaseN); SplineInst->copy_spline(spline_i,2*ispline+1,BaseOffset, BaseN); } void set_spline(ST* restrict psi_r, ST* restrict psi_i, int twist, int ispline, int level) { Vector<ST> v_r(psi_r,0), v_i(psi_i,0); SplineInst->set(2*ispline ,v_r); SplineInst->set(2*ispline+1,v_i); } inline void set_spline_domain(SingleSplineType* spline_r, SingleSplineType* spline_i, int twist, int ispline, const int* offset_l, const int* mesh_l) { } bool read_splines(hdf_archive& h5f) { std::ostringstream o; o<<"spline_" << SplineAdoptorBase<ST,D>::MyIndex; einspline_engine<SplineType> bigtable(SplineInst->spline_m); return h5f.read(bigtable,o.str().c_str());//"spline_0"); } bool write_splines(hdf_archive& h5f) { std::ostringstream o; o<<"spline_" << SplineAdoptorBase<ST,D>::MyIndex; einspline_engine<SplineType> bigtable(SplineInst->spline_m); return h5f.write(bigtable,o.str().c_str());//"spline_0"); } template<typename VV> inline void assign_v(const PointType& r, const vContainer_type& myV, VV& psi, int first = 0, int last = -1) const { // protect last last = last<0 ? kPoints.size() : (last>kPoints.size() ? kPoints.size() : last); const ST x=r[0], y=r[1], z=r[2]; const ST* restrict kx=myKcart.data(0); const ST* restrict ky=myKcart.data(1); const ST* restrict kz=myKcart.data(2); TT* restrict psi_s=psi.data()+first_spo; #pragma omp simd for (size_t j=first; j<std::min(nComplexBands,last); j++) { ST s, c; const size_t jr=j<<1; const size_t ji=jr+1; const ST val_r=myV[jr]; const ST val_i=myV[ji]; sincos(-(x*kx[j]+y*ky[j]+z*kz[j]),&s,&c); psi_s[jr] = val_r*c-val_i*s; psi_s[ji] = val_i*c+val_r*s; } psi_s += nComplexBands; #pragma omp simd for (size_t j=std::max(nComplexBands,first); j<last; j++) { ST s, c; const ST val_r=myV[2*j ]; const ST val_i=myV[2*j+1]; sincos(-(x*kx[j]+y*ky[j]+z*kz[j]),&s,&c); psi_s[j] = val_r*c-val_i*s; } } template<typename VV> inline void evaluate_v(const ParticleSet& P, const int iat, VV& psi) { const PointType& r=P.activeR(iat); PointType ru(PrimLattice.toUnit_floor(r)); #pragma omp parallel { int first, last; FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last); spline2::evaluate3d(SplineInst->spline_m,ru,myV,first,last); assign_v(r,myV,psi,first/2,last/2); } } template<typename VM, typename VAV> inline void evaluateValues(const VirtualParticleSet& VP, VM& psiM, VAV& SPOMem) { #pragma omp parallel { int first, last; FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last); const size_t m=psiM.cols(); for(int iat=0; iat<VP.getTotalNum(); ++iat) { const PointType& r=VP.activeR(iat); PointType ru(PrimLattice.toUnit_floor(r)); Vector<TT> psi(psiM[iat],m); spline2::evaluate3d(SplineInst->spline_m,ru,myV,first,last); assign_v(r,myV,psi,first/2,last/2); } } } inline size_t estimateMemory(const int nP) { return 0; } /** assign_vgl */ template<typename VV, typename GV> inline void assign_vgl(const PointType& r, VV& psi, GV& dpsi, VV& d2psi, int first = 0, int last = -1) const { // protect last last = last<0 ? kPoints.size() : (last>kPoints.size() ? kPoints.size() : last); constexpr ST two(2); const ST g00=PrimLattice.G(0), g01=PrimLattice.G(1), g02=PrimLattice.G(2), g10=PrimLattice.G(3), g11=PrimLattice.G(4), g12=PrimLattice.G(5), g20=PrimLattice.G(6), g21=PrimLattice.G(7), g22=PrimLattice.G(8); const ST x=r[0], y=r[1], z=r[2]; const ST symGG[6]={GGt[0],GGt[1]+GGt[3],GGt[2]+GGt[6],GGt[4],GGt[5]+GGt[7],GGt[8]}; const ST* restrict k0=myKcart.data(0); ASSUME_ALIGNED(k0); const ST* restrict k1=myKcart.data(1); ASSUME_ALIGNED(k1); const ST* restrict k2=myKcart.data(2); ASSUME_ALIGNED(k2); const ST* restrict g0=myG.data(0); ASSUME_ALIGNED(g0); const ST* restrict g1=myG.data(1); ASSUME_ALIGNED(g1); const ST* restrict g2=myG.data(2); ASSUME_ALIGNED(g2); const ST* restrict h00=myH.data(0); ASSUME_ALIGNED(h00); const ST* restrict h01=myH.data(1); ASSUME_ALIGNED(h01); const ST* restrict h02=myH.data(2); ASSUME_ALIGNED(h02); const ST* restrict h11=myH.data(3); ASSUME_ALIGNED(h11); const ST* restrict h12=myH.data(4); ASSUME_ALIGNED(h12); const ST* restrict h22=myH.data(5); ASSUME_ALIGNED(h22); #pragma omp simd for (size_t j=first; j<std::min(nComplexBands,last); j++) { const size_t jr=j<<1; const size_t ji=jr+1; const ST kX=k0[j]; const ST kY=k1[j]; const ST kZ=k2[j]; const ST val_r=myV[jr]; const ST val_i=myV[ji]; //phase ST s, c; sincos(-(x*kX+y*kY+z*kZ),&s,&c); //dot(PrimLattice.G,myG[j]) const ST dX_r = g00*g0[jr]+g01*g1[jr]+g02*g2[jr]; const ST dY_r = g10*g0[jr]+g11*g1[jr]+g12*g2[jr]; const ST dZ_r = g20*g0[jr]+g21*g1[jr]+g22*g2[jr]; const ST dX_i = g00*g0[ji]+g01*g1[ji]+g02*g2[ji]; const ST dY_i = g10*g0[ji]+g11*g1[ji]+g12*g2[ji]; const ST dZ_i = g20*g0[ji]+g21*g1[ji]+g22*g2[ji]; // \f$\nabla \psi_r + {\bf k}\psi_i\f$ const ST gX_r=dX_r+val_i*kX; const ST gY_r=dY_r+val_i*kY; const ST gZ_r=dZ_r+val_i*kZ; const ST gX_i=dX_i-val_r*kX; const ST gY_i=dY_i-val_r*kY; const ST gZ_i=dZ_i-val_r*kZ; const ST lcart_r=SymTrace(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],symGG); const ST lcart_i=SymTrace(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],symGG); const ST lap_r=lcart_r+mKK[j]*val_r+two*(kX*dX_i+kY*dY_i+kZ*dZ_i); const ST lap_i=lcart_i+mKK[j]*val_i-two*(kX*dX_r+kY*dY_r+kZ*dZ_r); const size_t psiIndex=first_spo+jr; //this will be fixed later psi[psiIndex ]=c*val_r-s*val_i; psi[psiIndex+1]=c*val_i+s*val_r; d2psi[psiIndex ]=c*lap_r-s*lap_i; d2psi[psiIndex+1]=c*lap_i+s*lap_r; //this will go way with Determinant dpsi[psiIndex ][0]=c*gX_r-s*gX_i; dpsi[psiIndex ][1]=c*gY_r-s*gY_i; dpsi[psiIndex ][2]=c*gZ_r-s*gZ_i; dpsi[psiIndex+1][0]=c*gX_i+s*gX_r; dpsi[psiIndex+1][1]=c*gY_i+s*gY_r; dpsi[psiIndex+1][2]=c*gZ_i+s*gZ_r; } #pragma omp simd for (size_t j=std::max(nComplexBands,first); j<last; j++) { const size_t jr=j<<1; const size_t ji=jr+1; const ST kX=k0[j]; const ST kY=k1[j]; const ST kZ=k2[j]; const ST val_r=myV[jr]; const ST val_i=myV[ji]; //phase ST s, c; sincos(-(x*kX+y*kY+z*kZ),&s,&c); //dot(PrimLattice.G,myG[j]) const ST dX_r = g00*g0[jr]+g01*g1[jr]+g02*g2[jr]; const ST dY_r = g10*g0[jr]+g11*g1[jr]+g12*g2[jr]; const ST dZ_r = g20*g0[jr]+g21*g1[jr]+g22*g2[jr]; const ST dX_i = g00*g0[ji]+g01*g1[ji]+g02*g2[ji]; const ST dY_i = g10*g0[ji]+g11*g1[ji]+g12*g2[ji]; const ST dZ_i = g20*g0[ji]+g21*g1[ji]+g22*g2[ji]; // \f$\nabla \psi_r + {\bf k}\psi_i\f$ const ST gX_r=dX_r+val_i*kX; const ST gY_r=dY_r+val_i*kY; const ST gZ_r=dZ_r+val_i*kZ; const ST gX_i=dX_i-val_r*kX; const ST gY_i=dY_i-val_r*kY; const ST gZ_i=dZ_i-val_r*kZ; const size_t psiIndex=first_spo+nComplexBands+j; psi[psiIndex ]=c*val_r-s*val_i; //this will be fixed later dpsi[psiIndex ][0]=c*gX_r-s*gX_i; dpsi[psiIndex ][1]=c*gY_r-s*gY_i; dpsi[psiIndex ][2]=c*gZ_r-s*gZ_i; const ST lcart_r=SymTrace(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],symGG); const ST lcart_i=SymTrace(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],symGG); const ST lap_r=lcart_r+mKK[j]*val_r+two*(kX*dX_i+kY*dY_i+kZ*dZ_i); const ST lap_i=lcart_i+mKK[j]*val_i-two*(kX*dX_r+kY*dY_r+kZ*dZ_r); d2psi[psiIndex ]=c*lap_r-s*lap_i; } } /** assign_vgl_from_l can be used when myL is precomputed and myV,myG,myL in cartesian */ template<typename VV, typename GV> inline void assign_vgl_from_l(const PointType& r, VV& psi, GV& dpsi, VV& d2psi) { constexpr ST two(2); const ST x=r[0], y=r[1], z=r[2]; const ST* restrict k0=myKcart.data(0); ASSUME_ALIGNED(k0); const ST* restrict k1=myKcart.data(1); ASSUME_ALIGNED(k1); const ST* restrict k2=myKcart.data(2); ASSUME_ALIGNED(k2); const ST* restrict g0=myG.data(0); ASSUME_ALIGNED(g0); const ST* restrict g1=myG.data(1); ASSUME_ALIGNED(g1); const ST* restrict g2=myG.data(2); ASSUME_ALIGNED(g2); const size_t N=kPoints.size(); #pragma omp simd for (size_t j=0; j<nComplexBands; j++) { const size_t jr=j<<1; const size_t ji=jr+1; const ST kX=k0[j]; const ST kY=k1[j]; const ST kZ=k2[j]; const ST val_r=myV[jr]; const ST val_i=myV[ji]; //phase ST s, c; sincos(-(x*kX+y*kY+z*kZ),&s,&c); //dot(PrimLattice.G,myG[j]) const ST dX_r = g0[jr]; const ST dY_r = g1[jr]; const ST dZ_r = g2[jr]; const ST dX_i = g0[ji]; const ST dY_i = g1[ji]; const ST dZ_i = g2[ji]; // \f$\nabla \psi_r + {\bf k}\psi_i\f$ const ST gX_r=dX_r+val_i*kX; const ST gY_r=dY_r+val_i*kY; const ST gZ_r=dZ_r+val_i*kZ; const ST gX_i=dX_i-val_r*kX; const ST gY_i=dY_i-val_r*kY; const ST gZ_i=dZ_i-val_r*kZ; const ST lap_r=myL[jr]+mKK[j]*val_r+two*(kX*dX_i+kY*dY_i+kZ*dZ_i); const ST lap_i=myL[ji]+mKK[j]*val_i-two*(kX*dX_r+kY*dY_r+kZ*dZ_r); //this will be fixed later const size_t psiIndex=first_spo+jr; psi[psiIndex ]=c*val_r-s*val_i; psi[psiIndex+1]=c*val_i+s*val_r; d2psi[psiIndex ]=c*lap_r-s*lap_i; d2psi[psiIndex+1]=c*lap_i+s*lap_r; //this will go way with Determinant dpsi[psiIndex ][0]=c*gX_r-s*gX_i; dpsi[psiIndex ][1]=c*gY_r-s*gY_i; dpsi[psiIndex ][2]=c*gZ_r-s*gZ_i; dpsi[psiIndex+1][0]=c*gX_i+s*gX_r; dpsi[psiIndex+1][1]=c*gY_i+s*gY_r; dpsi[psiIndex+1][2]=c*gZ_i+s*gZ_r; } #pragma omp simd for (size_t j=nComplexBands; j<N; j++) { const size_t jr=j<<1; const size_t ji=jr+1; const ST kX=k0[j]; const ST kY=k1[j]; const ST kZ=k2[j]; const ST val_r=myV[jr]; const ST val_i=myV[ji]; //phase ST s, c; sincos(-(x*kX+y*kY+z*kZ),&s,&c); //dot(PrimLattice.G,myG[j]) const ST dX_r = g0[jr]; const ST dY_r = g1[jr]; const ST dZ_r = g2[jr]; const ST dX_i = g0[ji]; const ST dY_i = g1[ji]; const ST dZ_i = g2[ji]; // \f$\nabla \psi_r + {\bf k}\psi_i\f$ const ST gX_r=dX_r+val_i*kX; const ST gY_r=dY_r+val_i*kY; const ST gZ_r=dZ_r+val_i*kZ; const ST gX_i=dX_i-val_r*kX; const ST gY_i=dY_i-val_r*kY; const ST gZ_i=dZ_i-val_r*kZ; const size_t psiIndex=first_spo+nComplexBands+j; psi[psiIndex ]=c*val_r-s*val_i; //this will be fixed later dpsi[psiIndex ][0]=c*gX_r-s*gX_i; dpsi[psiIndex ][1]=c*gY_r-s*gY_i; dpsi[psiIndex ][2]=c*gZ_r-s*gZ_i; const ST lap_r=myL[jr]+mKK[j]*val_r+two*(kX*dX_i+kY*dY_i+kZ*dZ_i); const ST lap_i=myL[ji]+mKK[j]*val_i-two*(kX*dX_r+kY*dY_r+kZ*dZ_r); d2psi[psiIndex ]=c*lap_r-s*lap_i; } } template<typename VV, typename GV> inline void evaluate_vgl(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, VV& d2psi) { const PointType& r=P.activeR(iat); PointType ru(PrimLattice.toUnit_floor(r)); #pragma omp parallel { int first, last; FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last); spline2::evaluate3d_vgh(SplineInst->spline_m,ru,myV,myG,myH,first,last); assign_vgl(r,psi,dpsi,d2psi,first/2,last/2); } } template<typename VV, typename GV, typename GGV> void assign_vgh(const PointType& r, VV& psi, GV& dpsi, GGV& grad_grad_psi, int first = 0, int last = -1) const { // protect last last = last<0 ? kPoints.size() : (last>kPoints.size() ? kPoints.size() : last); const ST g00=PrimLattice.G(0), g01=PrimLattice.G(1), g02=PrimLattice.G(2), g10=PrimLattice.G(3), g11=PrimLattice.G(4), g12=PrimLattice.G(5), g20=PrimLattice.G(6), g21=PrimLattice.G(7), g22=PrimLattice.G(8); const ST x=r[0], y=r[1], z=r[2]; const ST* restrict k0=myKcart.data(0); const ST* restrict k1=myKcart.data(1); const ST* restrict k2=myKcart.data(2); const ST* restrict g0=myG.data(0); const ST* restrict g1=myG.data(1); const ST* restrict g2=myG.data(2); const ST* restrict h00=myH.data(0); const ST* restrict h01=myH.data(1); const ST* restrict h02=myH.data(2); const ST* restrict h11=myH.data(3); const ST* restrict h12=myH.data(4); const ST* restrict h22=myH.data(5); #pragma omp simd for (size_t j=first; j<std::min(nComplexBands,last); j++) { int jr=j<<1; int ji=jr+1; const ST kX=k0[j]; const ST kY=k1[j]; const ST kZ=k2[j]; const ST val_r=myV[jr]; const ST val_i=myV[ji]; //phase ST s, c; sincos(-(x*kX+y*kY+z*kZ),&s,&c); //dot(PrimLattice.G,myG[j]) const ST dX_r = g00*g0[jr]+g01*g1[jr]+g02*g2[jr]; const ST dY_r = g10*g0[jr]+g11*g1[jr]+g12*g2[jr]; const ST dZ_r = g20*g0[jr]+g21*g1[jr]+g22*g2[jr]; const ST dX_i = g00*g0[ji]+g01*g1[ji]+g02*g2[ji]; const ST dY_i = g10*g0[ji]+g11*g1[ji]+g12*g2[ji]; const ST dZ_i = g20*g0[ji]+g21*g1[ji]+g22*g2[ji]; // \f$\nabla \psi_r + {\bf k}\psi_i\f$ const ST gX_r=dX_r+val_i*kX; const ST gY_r=dY_r+val_i*kY; const ST gZ_r=dZ_r+val_i*kZ; const ST gX_i=dX_i-val_r*kX; const ST gY_i=dY_i-val_r*kY; const ST gZ_i=dZ_i-val_r*kZ; const size_t psiIndex=first_spo+jr; psi[psiIndex] =c*val_r-s*val_i; dpsi[psiIndex][0] =c*gX_r-s*gX_i; dpsi[psiIndex][1] =c*gY_r-s*gY_i; dpsi[psiIndex][2] =c*gZ_r-s*gZ_i; psi[psiIndex+1]=c*val_i+s*val_r; dpsi[psiIndex+1][0]=c*gX_i+s*gX_r; dpsi[psiIndex+1][1]=c*gY_i+s*gY_r; dpsi[psiIndex+1][2]=c*gZ_i+s*gZ_r; const ST h_xx_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g00,g01,g02,g00,g01,g02)+kX*(gX_i+dX_i); const ST h_xy_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g00,g01,g02,g10,g11,g12)+kX*(gY_i+dY_i); const ST h_xz_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g00,g01,g02,g20,g21,g22)+kX*(gZ_i+dZ_i); const ST h_yx_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g10,g11,g12,g00,g01,g02)+kY*(gX_i+dX_i); const ST h_yy_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g10,g11,g12,g10,g11,g12)+kY*(gY_i+dY_i); const ST h_yz_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g10,g11,g12,g20,g21,g22)+kY*(gZ_i+dZ_i); const ST h_zx_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g20,g21,g22,g00,g01,g02)+kZ*(gX_i+dX_i); const ST h_zy_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g20,g21,g22,g10,g11,g12)+kZ*(gY_i+dY_i); const ST h_zz_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g20,g21,g22,g20,g21,g22)+kZ*(gZ_i+dZ_i); const ST h_xx_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g00,g01,g02,g00,g01,g02)-kX*(gX_r+dX_r); const ST h_xy_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g00,g01,g02,g10,g11,g12)-kX*(gY_r+dY_r); const ST h_xz_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g00,g01,g02,g20,g21,g22)-kX*(gZ_r+dZ_r); const ST h_yx_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g10,g11,g12,g00,g01,g02)-kY*(gX_r+dX_r); const ST h_yy_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g10,g11,g12,g10,g11,g12)-kY*(gY_r+dY_r); const ST h_yz_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g10,g11,g12,g20,g21,g22)-kY*(gZ_r+dZ_r); const ST h_zx_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g20,g21,g22,g00,g01,g02)-kZ*(gX_r+dX_r); const ST h_zy_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g20,g21,g22,g10,g11,g12)-kZ*(gY_r+dY_r); const ST h_zz_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g20,g21,g22,g20,g21,g22)-kZ*(gZ_r+dZ_r); grad_grad_psi[psiIndex][0]=c*h_xx_r-s*h_xx_i; grad_grad_psi[psiIndex][1]=c*h_xy_r-s*h_xy_i; grad_grad_psi[psiIndex][2]=c*h_xz_r-s*h_xz_i; grad_grad_psi[psiIndex][3]=c*h_yx_r-s*h_yx_i; grad_grad_psi[psiIndex][4]=c*h_yy_r-s*h_yy_i; grad_grad_psi[psiIndex][5]=c*h_yz_r-s*h_yz_i; grad_grad_psi[psiIndex][6]=c*h_zx_r-s*h_zx_i; grad_grad_psi[psiIndex][7]=c*h_zy_r-s*h_zy_i; grad_grad_psi[psiIndex][8]=c*h_zz_r-s*h_zz_i; grad_grad_psi[psiIndex+1][0]=c*h_xx_i+s*h_xx_r; grad_grad_psi[psiIndex+1][1]=c*h_xy_i+s*h_xy_r; grad_grad_psi[psiIndex+1][2]=c*h_xz_i+s*h_xz_r; grad_grad_psi[psiIndex+1][3]=c*h_yx_i+s*h_yx_r; grad_grad_psi[psiIndex+1][4]=c*h_yy_i+s*h_yy_r; grad_grad_psi[psiIndex+1][5]=c*h_yz_i+s*h_yz_r; grad_grad_psi[psiIndex+1][6]=c*h_zx_i+s*h_zx_r; grad_grad_psi[psiIndex+1][7]=c*h_zy_i+s*h_zy_r; grad_grad_psi[psiIndex+1][8]=c*h_zz_i+s*h_zz_r; } #pragma omp simd for (size_t j=std::max(nComplexBands,first); j<last; j++) { int jr=j<<1; int ji=jr+1; const ST kX=k0[j]; const ST kY=k1[j]; const ST kZ=k2[j]; const ST val_r=myV[jr]; const ST val_i=myV[ji]; //phase ST s, c; sincos(-(x*kX+y*kY+z*kZ),&s,&c); //dot(PrimLattice.G,myG[j]) const ST dX_r = g00*g0[jr]+g01*g1[jr]+g02*g2[jr]; const ST dY_r = g10*g0[jr]+g11*g1[jr]+g12*g2[jr]; const ST dZ_r = g20*g0[jr]+g21*g1[jr]+g22*g2[jr]; const ST dX_i = g00*g0[ji]+g01*g1[ji]+g02*g2[ji]; const ST dY_i = g10*g0[ji]+g11*g1[ji]+g12*g2[ji]; const ST dZ_i = g20*g0[ji]+g21*g1[ji]+g22*g2[ji]; // \f$\nabla \psi_r + {\bf k}\psi_i\f$ const ST gX_r=dX_r+val_i*kX; const ST gY_r=dY_r+val_i*kY; const ST gZ_r=dZ_r+val_i*kZ; const ST gX_i=dX_i-val_r*kX; const ST gY_i=dY_i-val_r*kY; const ST gZ_i=dZ_i-val_r*kZ; const size_t psiIndex=first_spo+nComplexBands+j; psi[psiIndex] =c*val_r-s*val_i; dpsi[psiIndex][0] =c*gX_r-s*gX_i; dpsi[psiIndex][1] =c*gY_r-s*gY_i; dpsi[psiIndex][2] =c*gZ_r-s*gZ_i; const ST h_xx_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g00,g01,g02,g00,g01,g02)+kX*(gX_i+dX_i); const ST h_xy_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g00,g01,g02,g10,g11,g12)+kX*(gY_i+dY_i); const ST h_xz_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g00,g01,g02,g20,g21,g22)+kX*(gZ_i+dZ_i); const ST h_yx_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g10,g11,g12,g00,g01,g02)+kY*(gX_i+dX_i); const ST h_yy_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g10,g11,g12,g10,g11,g12)+kY*(gY_i+dY_i); const ST h_yz_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g10,g11,g12,g20,g21,g22)+kY*(gZ_i+dZ_i); const ST h_zx_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g20,g21,g22,g00,g01,g02)+kZ*(gX_i+dX_i); const ST h_zy_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g20,g21,g22,g10,g11,g12)+kZ*(gY_i+dY_i); const ST h_zz_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g20,g21,g22,g20,g21,g22)+kZ*(gZ_i+dZ_i); const ST h_xx_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g00,g01,g02,g00,g01,g02)-kX*(gX_r+dX_r); const ST h_xy_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g00,g01,g02,g10,g11,g12)-kX*(gY_r+dY_r); const ST h_xz_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g00,g01,g02,g20,g21,g22)-kX*(gZ_r+dZ_r); const ST h_yx_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g10,g11,g12,g00,g01,g02)-kY*(gX_r+dX_r); const ST h_yy_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g10,g11,g12,g10,g11,g12)-kY*(gY_r+dY_r); const ST h_yz_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g10,g11,g12,g20,g21,g22)-kY*(gZ_r+dZ_r); const ST h_zx_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g20,g21,g22,g00,g01,g02)-kZ*(gX_r+dX_r); const ST h_zy_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g20,g21,g22,g10,g11,g12)-kZ*(gY_r+dY_r); const ST h_zz_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g20,g21,g22,g20,g21,g22)-kZ*(gZ_r+dZ_r); grad_grad_psi[psiIndex][0]=c*h_xx_r-s*h_xx_i; grad_grad_psi[psiIndex][1]=c*h_xy_r-s*h_xy_i; grad_grad_psi[psiIndex][2]=c*h_xz_r-s*h_xz_i; grad_grad_psi[psiIndex][3]=c*h_yx_r-s*h_yx_i; grad_grad_psi[psiIndex][4]=c*h_yy_r-s*h_yy_i; grad_grad_psi[psiIndex][5]=c*h_yz_r-s*h_yz_i; grad_grad_psi[psiIndex][6]=c*h_zx_r-s*h_zx_i; grad_grad_psi[psiIndex][7]=c*h_zy_r-s*h_zy_i; grad_grad_psi[psiIndex][8]=c*h_zz_r-s*h_zz_i; } } template<typename VV, typename GV, typename GGV> void evaluate_vgh(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, GGV& grad_grad_psi) { const PointType& r=P.activeR(iat); PointType ru(PrimLattice.toUnit_floor(r)); #pragma omp parallel { int first, last; FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last); spline2::evaluate3d_vgh(SplineInst->spline_m,ru,myV,myG,myH,first,last); assign_vgh(r,psi,dpsi,grad_grad_psi,first/2,last/2); } } }; } #endif
buggy_version.c
#include <stdio.h> int main(){ int sum = 0; int DATA_MAG = 100; int H[100]; int scale_factor = 10; #pragma omp parallel for reduction(+: sum) for (int i =0; i < DATA_MAG;i++) { H[i] = i; } int LUT[100]; #pragma omp parallel for reduction(+: sum) for (int i = 0; i < DATA_MAG; i++) { sum += H[i]; LUT[i] = sum * scale_factor; } for (int i = 0; i < 100; i++) { printf("%d \n",LUT[i]); } return 0; }
swapCheck2.c
int main() { int A = 5; int B = 10; int C; int D = 0; #pragma omp parallel { #pragma omp atomic D = D + 1; int x = 11; while (1) { #pragma omp master { l4: D = D + A + B; } #pragma omp barrier #pragma omp single nowait { l1: C = A; l2: A = B; l3: B = C; } #pragma omp barrier #pragma omp master { l8: D = D + A + B; } #pragma omp barrier #pragma omp single nowait { l5: C = A; l6: A = B; l7: B = C; } #pragma omp barrier #pragma omp master { l12: D = D + A + B; } #pragma omp barrier #pragma omp single nowait { l9: C = A; l10: A = B; l11: B = C; } #pragma omp barrier #pragma omp master { l16: D = D + A + B; } #pragma omp barrier #pragma omp single nowait { l13: C = A; l14: A = B; l15: B = C; } #pragma omp barrier if (x > 10) { break; } } } }
calib.c
/* Copyright 2013-2016. The Regents of the University of California. * Copyright 2016. Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2012-2016 Martin Uecker <martin.uecker@med.uni-goettingen.de> * 2013 Dara Bahri <dbahri123@gmail.com> * 2015-2016 Siddharth Iyer <sid8795@gmail.com> * * * Uecker M, Lai P, Murphy MJ, Virtue P, Elad M, Pauly JM, Vasanawala SS, Lustig M. * ESPIRiT - An Eigenvalue Approach to Autocalibrating Parallel MRI: Where SENSE * meets GRAPPA. Magn Reson Med, 71:990-1001 (2014) * * Iyer S, Ong F, Lustig M. * Towards A Parameter Free ESPIRiT: Soft-Weighting For Robust Coil Sensitivity Estimation. * Presented in the session: "New Frontiers In Image Reconstruction" at ISMRM 2016. * http://www.ismrm.org/16/program_files/O86.htm * */ #include <assert.h> #include <complex.h> #include <math.h> #include <stdbool.h> #include "num/multind.h" #include "num/fft.h" #include "num/flpmath.h" #include "num/linalg.h" #include "num/lapack.h" #include "num/casorati.h" #include "num/rand.h" #include "misc/misc.h" #include "misc/mri.h" #include "misc/resize.h" #include "misc/debug.h" #include "misc/utils.h" #include "calib/calmat.h" #include "calib/cc.h" #include "calib/softweight.h" #include "calib.h" #ifdef USE_CUDA #include "calib/calibcu.h" #endif #if 0 #define CALMAT_SVD #endif #if 0 #define FLIP #endif #ifndef M_PI #define M_PI 3.14159265358979323846 #endif static void eigen_herm3(int M, int N, float val[M], complex float matrix[N][N]) // ordering might be different to herm2 { complex float mout[M][N]; for (int li = 0; li < N; li++) for (int lj = 0; lj < li; lj++) matrix[lj][li] = conj(matrix[li][lj]); //mat_identity(M, N, mout); orthiter(M, N, 30, val, mout, matrix); for (int i = 0; i < M; i++) for (int j = 0; j < N; j++) matrix[i][j] = mout[i][j]; } static float scurve(float x) { if (x <= -1.) return 0.; if (x >= 1.) return 1.; return 0.5 * (1. + 2. * x / (1. + powf(x, 2.))); } static float crop_weight_function(float crth, float val) { return scurve((sqrtf(val) - crth) / (1. - crth)); } static float crop_thresh_function(float crth, float val) { return (val <= crth) ? 0. : 1.; } typedef float (*weight_function)(float crth, float val); static void crop_weight(const long dims[DIMS], complex float* ptr, weight_function fun, float crth, const complex float* map) { long xx = dims[0]; long yy = dims[1]; long zz = dims[2]; long cc = dims[3]; long mm = dims[4]; assert(DIMS > 5); assert(1 == md_calc_size(DIMS - 5, dims + 5)); for (long m = 0; m < mm; m++) { #pragma omp parallel for for (long k = 0; k < zz; k++) { for (long i = 0; i < yy; i++) { for (long j = 0; j < xx; j++) { float val = cabsf(map[((m * zz + k) * yy + i) * xx + j]); for (long c = 0; c < cc; c++) ptr[(((m * cc + c) * zz + k) * yy + i) * xx + j] *= fun(crth, val); } } } } } void crop_sens(const long dims[DIMS], complex float* ptr, bool soft, float crth, const complex float* map) { crop_weight(dims, ptr, soft ? crop_weight_function : crop_thresh_function, crth, map); } /** * sure_crop - This determines the crop-threshold to use as described in the talk: "Towards A Parameter * Free ESPIRiT: Soft-Weighting For Robust Coil Sensitivity Estimation". This was given at the * session: "New Frontiers In Image Reconstruction" at ISMRM 2016. * * Parameters: * var - Estimated variance in data. * evec_dims - The eigenvector dimensions. * evec_data - The eigenvectors. * eptr - The eigenvalues. * calreg_dims - Dimension of the calibration region. * calreg - Calibration data. */ static float sure_crop(float var, const long evec_dims[5], complex float* evec_data, complex float* eptr, const long calreg_dims[5], const complex float* calreg) { long num_maps = evec_dims[4]; // Construct low-resolution image long im_dims[5]; md_select_dims(5, 15, im_dims, evec_dims); complex float* im = md_alloc_sameplace(5, im_dims, CFL_SIZE, calreg); md_clear(5, im_dims, im, CFL_SIZE); md_resize_center(5, im_dims, im, calreg_dims, calreg, CFL_SIZE); ifftuc(5, im_dims, FFT_FLAGS, im, im); // Temporary vector for crop dimensions long cropdims[5]; md_select_dims(5, 15, cropdims, calreg_dims); cropdims[4] = num_maps; // Eigenvectors (M) complex float* M = md_alloc_sameplace(5, evec_dims, CFL_SIZE, calreg); md_copy(5, evec_dims, M, evec_data, CFL_SIZE); // Temporary eigenvector holder to hold low resolution maps complex float* LM = md_alloc_sameplace(5, evec_dims, CFL_SIZE, calreg); // Temporary holder for projection calreg complex float* TC = md_alloc_sameplace(5, calreg_dims, CFL_SIZE, calreg); // Temporary holder to hold low resolution calib maps complex float* CM = md_alloc_sameplace(5, cropdims, CFL_SIZE, calreg); // Eigenvalues (W) long W_dims[5]; md_select_dims(5, 23, W_dims, evec_dims); complex float* W = md_alloc_sameplace(5, W_dims, CFL_SIZE, calreg); md_copy(5, W_dims, W, eptr, CFL_SIZE); // Place holder for the inner product result complex float* ip = md_alloc_sameplace(5, W_dims, CFL_SIZE, calreg); // Place holder for the projection result complex float* proj = md_alloc_sameplace(5, im_dims, CFL_SIZE, calreg); // Place holder for divergence term long div_dims[5] = MD_INIT_ARRAY(5, 1); complex float* div = md_alloc_sameplace(5, div_dims, CFL_SIZE, calreg); // Calculating strides. long str1_ip[5]; long str2_ip[5]; long stro_ip[5]; md_calc_strides(5, str1_ip, im_dims, CFL_SIZE); md_calc_strides(5, str2_ip, evec_dims, CFL_SIZE); md_calc_strides(5, stro_ip, W_dims, CFL_SIZE); long str1_proj[5]; long str2_proj[5]; long stro_proj[5]; md_calc_strides(5, str1_proj, W_dims, CFL_SIZE); md_calc_strides(5, str2_proj, evec_dims, CFL_SIZE); md_calc_strides(5, stro_proj, im_dims, CFL_SIZE); long str1_div[5]; long str2_div[5]; long stro_div[5]; md_calc_strides(5, str1_div, evec_dims, CFL_SIZE); md_calc_strides(5, str2_div, evec_dims, CFL_SIZE); md_calc_strides(5, stro_div, div_dims, CFL_SIZE); long tdims_ip[5]; long tdims_proj[5]; for (unsigned int i = 0; i < 5; i++) { assert((im_dims[i] == evec_dims[i]) || (1 == im_dims[i]) || (1 == evec_dims[i])); assert((W_dims[i] == evec_dims[i]) || (1 == W_dims[i]) || (1 == evec_dims[i])); tdims_ip[i] = (1 == im_dims[i]) ? evec_dims[i] : im_dims[i]; tdims_proj[i] = (1 == W_dims[i]) ? evec_dims[i] : W_dims[i]; } // Starting parameter sweep with SURE. float mse = -1.; float old_mse = 0.; float s = -0.1; float c = 0.99; long ctr1 = 0; long ctr2 = 0; debug_printf(DP_INFO, "---------------------------------------------\n"); debug_printf(DP_INFO, "| CTR1 | CTR2 | Crop | Est. MSE |\n"); debug_printf(DP_INFO, "---------------------------------------------\n"); while (fabs(s) > 1.E-4) { ctr1++; while ( (c < 0.999) && (c > 0.001) && ( (ctr2 <= 1) || (mse < old_mse))) { ctr2++; md_clear(5, W_dims, ip, CFL_SIZE); md_clear(5, im_dims, proj, CFL_SIZE); md_clear(5, div_dims, div, CFL_SIZE); md_clear(5, evec_dims, M, CFL_SIZE); md_clear(5, evec_dims, LM, CFL_SIZE); md_clear(5, calreg_dims, TC, CFL_SIZE); md_copy(5, evec_dims, M, evec_data, CFL_SIZE); old_mse = mse; mse = 0.; crop_weight(evec_dims, M, crop_thresh_function, c, W); md_zfmacc2(5, tdims_ip, stro_ip, ip, str1_ip, im, str2_ip, M); // Projection. md_zfmac2(5, tdims_proj, stro_proj, proj, str1_proj, ip, str2_proj, M); fftuc(5, im_dims, FFT_FLAGS, proj, proj); // Low res proj img. md_resize_center(5, calreg_dims, TC, im_dims, proj, CFL_SIZE); md_resize_center(5, im_dims, proj, calreg_dims, TC, CFL_SIZE); ifftuc(5, im_dims, FFT_FLAGS, proj, proj); for (long jdx = 0; jdx < md_calc_size(5, im_dims); jdx++) mse += powf(cabsf(im[jdx] - proj[jdx]), 2.); fftuc(5, evec_dims, FFT_FLAGS, LM, M); // low-res maps . md_resize_center(5, cropdims, CM, evec_dims, LM, CFL_SIZE); md_resize_center(5, evec_dims, LM, cropdims, CM, CFL_SIZE); ifftuc(5, evec_dims, FFT_FLAGS, LM, LM); md_zfmacc2(5, evec_dims, stro_div, div, str1_div, LM, str2_div, LM); // Calc SURE div using low res maps. mse += 2. * var * crealf(*div); if (ctr2 == 1) debug_printf(DP_INFO, "| %4ld | %4ld | %0.4f | %0.12e |\n", ctr1, ctr2, c, mse); else debug_printf(DP_INFO, "| | %4ld | %0.4f | %0.12e |\n", ctr2, c, mse); c = c + s; } c -= s; ctr2 = 0; s = -s / 2; c += s; } c = c + s; debug_printf(DP_INFO, "---------------------------------------------\n"); md_free(im); md_free(TC); md_free(CM); md_free(M); md_free(LM); md_free(W); md_free(ip); md_free(proj); md_free(div); debug_printf(DP_DEBUG1, "Calculated c: %.4f\n", c); return c; } void calone(const struct ecalib_conf* conf, const long cov_dims[4], complex float* imgcov, unsigned int SN, float svals[SN], const long calreg_dims[DIMS], const complex float* data) { assert(1 == md_calc_size(DIMS - 5, calreg_dims + 5)); #if 1 long nskerns_dims[5]; complex float* nskerns; compute_kernels(conf, nskerns_dims, &nskerns, SN, svals, calreg_dims, data); #else long channels = calreg_dims[3]; long kx = conf->kdims[0]; long ky = conf->kdims[1]; long kz = conf->kdims[2]; long nskerns_dims[5] = { kx, ky, kz, channels, 0 }; long N = md_calc_size(4, nskerns_dims); assert(N > 0); nskerns_dims[4] = N; complex float* nskerns = md_alloc(5, nskerns_dims, CFL_SIZE); long nr_kernels = channels; nskerns_dims[4] = channels; spirit_kernel(nskerns_dims, nskerns, calreg_dims, data); #endif compute_imgcov(cov_dims, imgcov, nskerns_dims, nskerns); md_free(nskerns); } /* calculate point-wise maps * */ void eigenmaps(const long out_dims[DIMS], complex float* optr, complex float* eptr, const complex float* imgcov2, const long msk_dims[3], const bool* msk, bool orthiter, bool ecal_usegpu) { #ifdef USE_CUDA if (ecal_usegpu) { //FIXME cuda version should be able to return sensitivities for a subset of image-space points assert(!msk); eigenmapscu(out_dims, optr, eptr, imgcov2); return; } #else assert(!ecal_usegpu); #endif long channels = out_dims[3]; long maps = out_dims[4]; assert(DIMS >= 5); assert(1 == md_calc_size(DIMS - 5, out_dims + 5)); assert(maps <= channels); long xx = out_dims[0]; long yy = out_dims[1]; long zz = out_dims[2]; float scale = 1.; // for some reason, not if (msk_dims) { assert(msk_dims[0] == xx); assert(msk_dims[1] == yy); assert(msk_dims[2] == zz); } md_clear(5, out_dims, optr, CFL_SIZE); #pragma omp parallel for collapse(3) for (long k = 0; k < zz; k++) { for (long j = 0; j < yy; j++) { for (long i = 0; i < xx; i++) { if (!msk || msk[i + xx * (j + yy * k)]) { float val[channels]; complex float cov[channels][channels]; complex float tmp[channels * (channels + 1) / 2]; for (long l = 0; l < channels * (channels + 1) / 2; l++) tmp[l] = imgcov2[((l * zz + k) * yy + j) * xx + i] / scale; unpack_tri_matrix(channels, cov, tmp); if (orthiter) eigen_herm3(maps, channels, val, cov); else lapack_eig(channels, val, cov); for (long u = 0; u < maps; u++) { long ru = (orthiter ? maps : channels) - 1 - u; for (long v = 0; v < channels; v++) optr[((((u * channels + v) * zz + k) * yy + j) * xx + i)] = cov[ru][v]; if (NULL != eptr) eptr[((u * zz + k) * yy + j) * xx + i] = val[ru]; } } } } } } void caltwo(const struct ecalib_conf* conf, const long out_dims[DIMS], complex float* out_data, complex float* emaps, const long in_dims[4], complex float* in_data, const long msk_dims[3], const bool* msk) { long xx = out_dims[0]; long yy = out_dims[1]; long zz = out_dims[2]; long xh = in_dims[0]; long yh = in_dims[1]; long zh = in_dims[2]; long channels = out_dims[3]; long cosize = channels * (channels + 1) / 2; assert(DIMS >= 5); assert(1 == md_calc_size(DIMS - 5, out_dims + 5)); assert(in_dims[3] == cosize); long cov_dims[4] = { xh, yh, zh, cosize }; long covbig_dims[4] = { xx, yy, zz, cosize }; assert(((xx == 1) && (xh == 1)) || (xx >= xh)); assert(((yy == 1) && (yh == 1)) || (yy >= yh)); assert(((zz == 1) && (zh == 1)) || (zz >= zh)); assert((1 == xh) || (0 == xh % 2)); assert((1 == yh) || (0 == yh % 2)); assert((1 == zh) || (0 == zh % 2)); complex float* imgcov2 = md_alloc(4, covbig_dims, CFL_SIZE); debug_printf(DP_DEBUG1, "Resize...\n"); sinc_zeropad(4, covbig_dims, imgcov2, cov_dims, in_data); debug_printf(DP_DEBUG1, "Point-wise eigen-decomposition...\n"); eigenmaps(out_dims, out_data, emaps, imgcov2, msk_dims, msk, conf->orthiter, conf->usegpu); md_free(imgcov2); } void calone_dims(const struct ecalib_conf* conf, long cov_dims[4], long channels) { long kx = conf->kdims[0]; long ky = conf->kdims[1]; long kz = conf->kdims[2]; cov_dims[0] = (1 == kx) ? 1 : (2 * kx); cov_dims[1] = (1 == ky) ? 1 : (2 * ky); cov_dims[2] = (1 == kz) ? 1 : (2 * kz); cov_dims[3] = channels * (channels + 1) / 2; } const struct ecalib_conf ecalib_defaults = { { 6, 6, 6 }, 0.001, -1, -1., false, false, 0.8, true, false, -1., false, true, -1., false}; void calib2(const struct ecalib_conf* conf, const long out_dims[DIMS], complex float* out_data, complex float* eptr, unsigned int SN, float svals[SN], const long calreg_dims[DIMS], const complex float* data, const long msk_dims[3], const bool* msk) { long channels = calreg_dims[3]; long maps = out_dims[4]; assert(calreg_dims[3] == out_dims[3]); assert(maps <= channels); assert(1 == md_calc_size(DIMS - 5, out_dims + 5)); assert(1 == md_calc_size(DIMS - 5, calreg_dims + 5)); complex float rot[channels][channels]; if (conf->rotphase) { // rotate the the phase with respect to the first principle component long scc_dims[DIMS] = MD_INIT_ARRAY(DIMS, 1); scc_dims[COIL_DIM] = channels; scc_dims[MAPS_DIM] = channels; scc(scc_dims, &rot[0][0], calreg_dims, data); } else { for (unsigned int i = 0; i < channels; i++) for (unsigned int j = 0; j < channels; j++) rot[i][j] = (i == j) ? 1. : 0.; } long cov_dims[4]; calone_dims(conf, cov_dims, channels); complex float* imgcov = md_alloc(4, cov_dims, CFL_SIZE); calone(conf, cov_dims, imgcov, SN, svals, calreg_dims, data); caltwo(conf, out_dims, out_data, eptr, cov_dims, imgcov, msk_dims, msk); /* Intensity and phase normalization similar as proposed * for adaptive combine (Walsh's method) in * Griswold et al., ISMRM 10:2410 (2002) */ if (conf->intensity) { debug_printf(DP_DEBUG1, "Normalize...\n"); /* I think the reason this works is because inhomogeneity usually * comes from only a few coil elements which are close. The l1-norm * is more resilient against such outliers. -- Martin */ normalizel1(DIMS, COIL_FLAG, out_dims, out_data); md_zsmul(DIMS, out_dims, out_data, out_data, sqrtf((float)channels)); } float c = (conf->crop >= 0.) ? conf->crop : sure_crop(conf->var, out_dims, out_data, eptr, calreg_dims, data); debug_printf(DP_DEBUG1, "Crop maps... (c = %.2f)\n", c); crop_sens(out_dims, out_data, conf->softcrop, c, eptr); debug_printf(DP_DEBUG1, "Fix phase...\n"); fixphase2(DIMS, out_dims, COIL_DIM, rot[0], out_data, out_data); md_free(imgcov); } void calib(const struct ecalib_conf* conf, const long out_dims[DIMS], complex float* out_data, complex float* eptr, unsigned int SN, float svals[SN], const long calreg_dims[DIMS], const complex float* data) { calib2(conf, out_dims, out_data, eptr, SN, svals, calreg_dims, data, NULL, NULL); } static void perturb(const long dims[2], complex float* vecs, float amt) { complex float* noise = md_alloc(2, dims, CFL_SIZE); md_gaussian_rand(2, dims, noise); for (long j = 0; j < dims[1]; j++) { float nrm = md_znorm(1, dims, noise + j * dims[0]); complex float val = amt / nrm; md_zsmul(1, dims, noise + j * dims[0], noise + j * dims[0], val); } md_zadd(2, dims, vecs, vecs, noise); for (long j = 0; j < dims[1]; j++) { float nrm = md_znorm(1, dims, vecs + j * dims[0]); complex float val = 1 / nrm; md_zsmul(1, dims, vecs + j * dims[0], vecs + j * dims[0], val); } md_free(noise); } static int number_of_kernels(const struct ecalib_conf* conf, unsigned int N, const float val[N]) { unsigned int n = 0; if (-1 != conf->numsv) { n = conf->numsv; assert(-1. == conf->percentsv); assert(-1. == conf->threshold); } else if (conf->percentsv != -1.) { n = (unsigned int)(N * conf->percentsv / 100.); assert(-1 == conf->numsv); assert(-1. == conf->threshold); } else { assert(-1 == conf->numsv); assert(-1. == conf->percentsv); for (unsigned int i = 0; i < N; i++) { if (val[i] / val[0] > sqrtf(conf->threshold)) n++; } } if (val[0] <= 0.) error("No signal.\n"); debug_printf(DP_DEBUG1, "Using %d/%ld kernels (%.2f%%, last SV: %f%s).\n", n, N, (float)n / (float)N * 100., (n > 0) ? (val[n - 1] / val[0]) : 1., conf->weighting ? ", weighted" : ""); float tr = 0.; for (unsigned int i = 0; i < N; i++) { tr += powf(val[i], 2.); debug_printf(DP_DEBUG3, "SVALS %f (%f)\n", val[i], val[i] / val[0]); } debug_printf(DP_DEBUG3, "\nTRACE: %f (%f)\n", tr, tr / (float)N); assert(n <= N); return n; } void compute_kernels(const struct ecalib_conf* conf, long nskerns_dims[5], complex float** nskerns_ptr, unsigned int SN, float val[SN], const long caldims[DIMS], const complex float* caldata) { assert(1 == md_calc_size(DIMS - 5, caldims + 5)); nskerns_dims[0] = conf->kdims[0]; nskerns_dims[1] = conf->kdims[1]; nskerns_dims[2] = conf->kdims[2]; nskerns_dims[3] = caldims[3]; long N = md_calc_size(4, nskerns_dims); assert(N > 0); nskerns_dims[4] = N; complex float* nskerns = md_alloc(5, nskerns_dims, CFL_SIZE); *nskerns_ptr = nskerns; PTR_ALLOC(complex float[N][N], vec); assert(NULL != val); assert(SN == N); debug_printf(DP_DEBUG1, "Build calibration matrix and SVD...\n"); #ifdef CALMAT_SVD calmat_svd(conf->kdims, N, *vec, val, caldims, caldata); if (conf->weighting) soft_weight_singular_vectors(N, conf->var, conf->kdims, caldims, val, val); for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) #ifndef FLIP nskerns[i * N + j] = ((*vec)[j][i]) * (conf->weighting ? val[i] : 1.); #else nskerns[i * N + j] = ((*vec)[j][N - 1 - i]) * (conf->weighting ? val[N - 1 - i] : 1.); #endif #else covariance_function(conf->kdims, N, *vec, caldims, caldata); debug_printf(DP_DEBUG1, "Eigen decomposition... (size: %ld)\n", N); // we could apply Nystroem method here to speed it up float tmp_val[N]; lapack_eig(N, tmp_val, *vec); // reverse and square root, test for smaller null to avoid NaNs for (int i = 0; i < N; i++) val[i] = (tmp_val[N - 1 - i] < 0.) ? 0. : sqrtf(tmp_val[N - 1 - i]); if (conf->weighting) soft_weight_singular_vectors(N, conf-> var, conf->kdims, caldims, val, val); for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) #ifndef FLIP nskerns[i * N + j] = (*vec)[N - 1 - i][j] * (conf->weighting ? val[i] : 1.); // flip #else nskerns[i * N + j] = (*vec)[i][j] * (conf->weighting ? val[N - 1 - i] : 1.); // flip #endif #endif if (conf->perturb > 0.) { long dims[2] = { N, N }; perturb(dims, nskerns, conf->perturb); } #ifndef FLIP nskerns_dims[4] = number_of_kernels(conf, N, val); #else nskerns_dims[4] = N - number_of_kernels(conf, N, val); #endif PTR_FREE(vec); } void compute_imgcov(const long cov_dims[4], complex float* imgcov, const long nskerns_dims[5], const complex float* nskerns) { debug_printf(DP_DEBUG1, "Zeropad...\n"); long xh = cov_dims[0]; long yh = cov_dims[1]; long zh = cov_dims[2]; long kx = nskerns_dims[0]; long ky = nskerns_dims[1]; long kz = nskerns_dims[2]; long channels = nskerns_dims[3]; long nr_kernels = nskerns_dims[4]; long imgkern_dims[5] = { xh, yh, zh, channels, nr_kernels }; complex float* imgkern1 = md_alloc(5, imgkern_dims, CFL_SIZE); complex float* imgkern2 = md_alloc(5, imgkern_dims, CFL_SIZE); md_resize_center(5, imgkern_dims, imgkern1, nskerns_dims, nskerns, CFL_SIZE); // resort array debug_printf(DP_DEBUG1, "FFT (juggling)...\n"); long istr[5]; long mstr[5]; long idim[5] = { xh, yh, zh, channels, nr_kernels }; long mdim[5] = { nr_kernels, channels, xh, yh, zh }; md_calc_strides(5, istr, idim, CFL_SIZE); md_calc_strides(5, mstr, mdim, CFL_SIZE); long m2str[5] = { mstr[2], mstr[3], mstr[4], mstr[1], mstr[0] }; ifftmod(5, imgkern_dims, FFT_FLAGS, imgkern1, imgkern1); ifft2(5, imgkern_dims, FFT_FLAGS, m2str, imgkern2, istr, imgkern1); float scalesq = (kx * ky * kz) * (xh * yh * zh); // second part for FFT scaling md_free(imgkern1); debug_printf(DP_DEBUG1, "Calculate Gram matrix...\n"); int cosize = channels * (channels + 1) / 2; assert(cov_dims[3] == cosize); #pragma omp parallel for collapse(3) for (int k = 0; k < zh; k++) { for (int j = 0; j < yh; j++) { for (int i = 0; i < xh; i++) { complex float gram[cosize]; gram_matrix2(channels, gram, nr_kernels, (const complex float (*)[nr_kernels])(imgkern2 + ((k * yh + j) * xh + i) * (channels * nr_kernels))); #ifdef FLIP // add (scaled) identity matrix for (int i = 0, l = 0; i < channels; i++) for (int j = 0; j <= i; j++, l++) gram[l] = ((i == j) ? (kx * ky * kz) : 0.) - gram[l]; #endif for (int l = 0; l < cosize; l++) imgcov[(((l * zh) + k) * yh + j) * xh + i] = gram[l] / scalesq; } } } md_free(imgkern2); }
data.h
/*! * Copyright (c) 2015 by Contributors * \file data.h * \brief The input data structure of xgboost. * \author Tianqi Chen */ #ifndef XGBOOST_DATA_H_ #define XGBOOST_DATA_H_ #include <dmlc/base.h> #include <dmlc/data.h> #include <rabit/rabit.h> #include <cstring> #include <memory> #include <numeric> #include <algorithm> #include <string> #include <vector> #include "./base.h" #include "../../src/common/span.h" #include "../../src/common/group_data.h" #include "../../src/common/host_device_vector.h" namespace xgboost { // forward declare learner. class LearnerImpl; /*! \brief data type accepted by xgboost interface */ enum DataType { kFloat32 = 1, kDouble = 2, kUInt32 = 3, kUInt64 = 4 }; /*! * \brief Meta information about dataset, always sit in memory. */ class MetaInfo { public: /*! \brief number of rows in the data */ uint64_t num_row_{0}; /*! \brief number of columns in the data */ uint64_t num_col_{0}; /*! \brief number of nonzero entries in the data */ uint64_t num_nonzero_{0}; /*! \brief label of each instance */ HostDeviceVector<bst_float> labels_; /*! * \brief specified root index of each instance, * can be used for multi task setting */ std::vector<bst_uint> root_index_; /*! * \brief the index of begin and end of a group * needed when the learning task is ranking. */ std::vector<bst_uint> group_ptr_; /*! \brief weights of each instance, optional */ HostDeviceVector<bst_float> weights_; /*! \brief session-id of each instance, optional */ std::vector<uint64_t> qids_; /*! * \brief initialized margins, * if specified, xgboost will start from this init margin * can be used to specify initial prediction to boost from. */ HostDeviceVector<bst_float> base_margin_; /*! \brief version flag, used to check version of this info */ static const int kVersion = 2; /*! \brief version that introduced qid field */ static const int kVersionQidAdded = 2; /*! \brief default constructor */ MetaInfo() = default; /*! * \brief Get weight of each instances. * \param i Instance index. * \return The weight. */ inline bst_float GetWeight(size_t i) const { return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f; } /*! * \brief Get the root index of i-th instance. * \param i Instance index. * \return The pre-defined root index of i-th instance. */ inline unsigned GetRoot(size_t i) const { return root_index_.size() != 0 ? root_index_[i] : 0U; } /*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */ inline const std::vector<size_t>& LabelAbsSort() const { if (label_order_cache_.size() == labels_.Size()) { return label_order_cache_; } label_order_cache_.resize(labels_.Size()); std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0); const auto& l = labels_.HostVector(); XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(), [&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);}); return label_order_cache_; } /*! \brief clear all the information */ void Clear(); /*! * \brief Load the Meta info from binary stream. * \param fi The input stream */ void LoadBinary(dmlc::Stream* fi); /*! * \brief Save the Meta info to binary stream * \param fo The output stream. */ void SaveBinary(dmlc::Stream* fo) const; /*! * \brief Set information in the meta info. * \param key The key of the information. * \param dptr The data pointer of the source array. * \param dtype The type of the source data. * \param num Number of elements in the source array. */ void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num); private: /*! \brief argsort of labels */ mutable std::vector<size_t> label_order_cache_; }; /*! \brief Element from a sparse vector */ struct Entry { /*! \brief feature index */ bst_uint index; /*! \brief feature value */ bst_float fvalue; /*! \brief default constructor */ Entry() = default; /*! * \brief constructor with index and value * \param index The feature or row index. * \param fvalue The feature value. */ Entry(bst_uint index, bst_float fvalue) : index(index), fvalue(fvalue) {} /*! \brief reversely compare feature values */ inline static bool CmpValue(const Entry& a, const Entry& b) { return a.fvalue < b.fvalue; } inline bool operator==(const Entry& other) const { return (this->index == other.index && this->fvalue == other.fvalue); } }; /*! * \brief In-memory storage unit of sparse batch, stored in CSR format. */ class SparsePage { public: // Offset for each row. HostDeviceVector<size_t> offset; /*! \brief the data of the segments */ HostDeviceVector<Entry> data; size_t base_rowid; /*! \brief an instance of sparse vector in the batch */ using Inst = common::Span<Entry const>; /*! \brief get i-th row from the batch */ inline Inst operator[](size_t i) const { const auto& data_vec = data.HostVector(); const auto& offset_vec = offset.HostVector(); size_t size; // in distributed mode, some partitions may not get any instance for a feature. Therefore // we should set the size as zero if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) { size = 0; } else { size = offset_vec[i + 1] - offset_vec[i]; } return {data_vec.data() + offset_vec[i], static_cast<Inst::index_type>(size)}; } /*! \brief constructor */ SparsePage() { this->Clear(); } /*! \return number of instance in the page */ inline size_t Size() const { return offset.Size() - 1; } /*! \return estimation of memory cost of this page */ inline size_t MemCostBytes() const { return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry); } /*! \brief clear the page */ inline void Clear() { base_rowid = 0; auto& offset_vec = offset.HostVector(); offset_vec.clear(); offset_vec.push_back(0); data.HostVector().clear(); } SparsePage GetTranspose(int num_columns) const { SparsePage transpose; common::ParallelGroupBuilder<Entry> builder(&transpose.offset.HostVector(), &transpose.data.HostVector()); const int nthread = omp_get_max_threads(); builder.InitBudget(num_columns, nthread); long batch_size = static_cast<long>(this->Size()); // NOLINT(*) #pragma omp parallel for schedule(static) for (long i = 0; i < batch_size; ++i) { // NOLINT(*) int tid = omp_get_thread_num(); auto inst = (*this)[i]; for (bst_uint j = 0; j < inst.size(); ++j) { builder.AddBudget(inst[j].index, tid); } } builder.InitStorage(); #pragma omp parallel for schedule(static) for (long i = 0; i < batch_size; ++i) { // NOLINT(*) int tid = omp_get_thread_num(); auto inst = (*this)[i]; for (bst_uint j = 0; j < inst.size(); ++j) { builder.Push( inst[j].index, Entry(static_cast<bst_uint>(this->base_rowid + i), inst[j].fvalue), tid); } } return transpose; } void SortRows() { auto ncol = static_cast<bst_omp_uint>(this->Size()); #pragma omp parallel for schedule(dynamic, 1) for (bst_omp_uint i = 0; i < ncol; ++i) { if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) { std::sort( this->data.HostVector().begin() + this->offset.HostVector()[i], this->data.HostVector().begin() + this->offset.HostVector()[i + 1], Entry::CmpValue); } } } /*! * \brief Push row block into the page. * \param batch the row batch. */ void Push(const dmlc::RowBlock<uint32_t>& batch); /*! * \brief Push a sparse page * \param batch the row page */ void Push(const SparsePage &batch); /*! * \brief Push a SparsePage stored in CSC format * \param batch The row batch to be pushed */ void PushCSC(const SparsePage& batch); /*! * \brief Push one instance into page * \param inst an instance row */ void Push(const Inst &inst); size_t Size() { return offset.Size() - 1; } }; class BatchIteratorImpl { public: virtual ~BatchIteratorImpl() {} virtual BatchIteratorImpl* Clone() = 0; virtual SparsePage& operator*() = 0; virtual const SparsePage& operator*() const = 0; virtual void operator++() = 0; virtual bool AtEnd() const = 0; }; class BatchIterator { public: using iterator_category = std::forward_iterator_tag; explicit BatchIterator(BatchIteratorImpl* impl) { impl_.reset(impl); } BatchIterator(const BatchIterator& other) { if (other.impl_) { impl_.reset(other.impl_->Clone()); } else { impl_.reset(); } } void operator++() { CHECK(impl_ != nullptr); ++(*impl_); } SparsePage& operator*() { CHECK(impl_ != nullptr); return *(*impl_); } const SparsePage& operator*() const { CHECK(impl_ != nullptr); return *(*impl_); } bool operator!=(const BatchIterator& rhs) const { CHECK(impl_ != nullptr); return !impl_->AtEnd(); } bool AtEnd() const { CHECK(impl_ != nullptr); return impl_->AtEnd(); } private: std::unique_ptr<BatchIteratorImpl> impl_; }; class BatchSet { public: explicit BatchSet(BatchIterator begin_iter) : begin_iter_(begin_iter) {} BatchIterator begin() { return begin_iter_; } BatchIterator end() { return BatchIterator(nullptr); } private: BatchIterator begin_iter_; }; /*! * \brief This is data structure that user can pass to DMatrix::Create * to create a DMatrix for training, user can create this data structure * for customized Data Loading on single machine. * * On distributed setting, usually an customized dmlc::Parser is needed instead. */ class DataSource : public dmlc::DataIter<SparsePage> { public: /*! * \brief Meta information about the dataset * The subclass need to be able to load this correctly from data. */ MetaInfo info; }; /*! * \brief A vector-like structure to represent set of rows. * But saves the memory when all rows are in the set (common case in xgb) */ class RowSet { public: /*! \return i-th row index */ inline bst_uint operator[](size_t i) const; /*! \return the size of the set. */ inline size_t Size() const; /*! \brief push the index back to the set */ inline void PushBack(bst_uint i); /*! \brief clear the set */ inline void Clear(); /*! * \brief save rowset to file. * \param fo The file to be saved. */ inline void Save(dmlc::Stream* fo) const; /*! * \brief Load rowset from file. * \param fi The file to be loaded. * \return if read is successful. */ inline bool Load(dmlc::Stream* fi); /*! \brief constructor */ RowSet() = default; private: /*! \brief The internal data structure of size */ uint64_t size_{0}; /*! \brief The internal data structure of row set if not all*/ std::vector<bst_uint> rows_; }; /*! * \brief Internal data structured used by XGBoost during training. * There are two ways to create a customized DMatrix that reads in user defined-format. * * - Provide a dmlc::Parser and pass into the DMatrix::Create * - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by DMLC_REGISTER_DATA_PARSER; * - This works best for user defined data input source, such as data-base, filesystem. * - Provide a DataSource, that can be passed to DMatrix::Create * This can be used to re-use inmemory data structure into DMatrix. */ class DMatrix { public: /*! \brief default constructor */ DMatrix() = default; /*! \brief meta information of the dataset */ virtual MetaInfo& Info() = 0; /*! \brief meta information of the dataset */ virtual const MetaInfo& Info() const = 0; /** * \brief Gets row batches. Use range based for loop over BatchSet to access individual batches. */ virtual BatchSet GetRowBatches() = 0; virtual BatchSet GetSortedColumnBatches() = 0; virtual BatchSet GetColumnBatches() = 0; // the following are column meta data, should be able to answer them fast. /*! \return Whether the data columns single column block. */ virtual bool SingleColBlock() const = 0; /*! \brief get column density */ virtual float GetColDensity(size_t cidx) = 0; /*! \brief virtual destructor */ virtual ~DMatrix() = default; /*! * \brief Save DMatrix to local file. * The saved file only works for non-sharded dataset(single machine training). * This API is deprecated and dis-encouraged to use. * \param fname The file name to be saved. * \return The created DMatrix. */ virtual void SaveToLocalFile(const std::string& fname); /*! * \brief Load DMatrix from URI. * \param uri The URI of input. * \param silent Whether print information during loading. * \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode. * \param file_format The format type of the file, used for dmlc::Parser::Create. * By default "auto" will be able to load in both local binary file. * \param page_size Page size for external memory. * \return The created DMatrix. */ static DMatrix* Load(const std::string& uri, bool silent, bool load_row_split, const std::string& file_format = "auto", const size_t page_size = kPageSize); /*! * \brief create a new DMatrix, by wrapping a row_iterator, and meta info. * \param source The source iterator of the data, the create function takes ownership of the source. * \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode. * This can be nullptr for common cases, and in-memory mode will be used. * \return a Created DMatrix. */ static DMatrix* Create(std::unique_ptr<DataSource>&& source, const std::string& cache_prefix = ""); /*! * \brief Create a DMatrix by loading data from parser. * Parser can later be deleted after the DMatrix i created. * \param parser The input data parser * \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode. * This can be nullptr for common cases, and in-memory mode will be used. * \param page_size Page size for external memory. * \sa dmlc::Parser * \note dmlc-core provides efficient distributed data parser for libsvm format. * User can create and register customized parser to load their own format using DMLC_REGISTER_DATA_PARSER. * See "dmlc-core/include/dmlc/data.h" for detail. * \return A created DMatrix. */ static DMatrix* Create(dmlc::Parser<uint32_t>* parser, const std::string& cache_prefix = "", const size_t page_size = kPageSize); /*! \brief page size 32 MB */ static const size_t kPageSize = 32UL << 20UL; }; // implementation of inline functions inline bst_uint RowSet::operator[](size_t i) const { return rows_.size() == 0 ? static_cast<bst_uint>(i) : rows_[i]; } inline size_t RowSet::Size() const { return size_; } inline void RowSet::Clear() { rows_.clear(); size_ = 0; } inline void RowSet::PushBack(bst_uint i) { if (rows_.size() == 0) { if (i == size_) { ++size_; return; } else { rows_.resize(size_); for (size_t i = 0; i < size_; ++i) { rows_[i] = static_cast<bst_uint>(i); } } } rows_.push_back(i); ++size_; } inline void RowSet::Save(dmlc::Stream* fo) const { fo->Write(rows_); fo->Write(&size_, sizeof(size_)); } inline bool RowSet::Load(dmlc::Stream* fi) { if (!fi->Read(&rows_)) return false; if (rows_.size() != 0) return true; return fi->Read(&size_, sizeof(size_)) == sizeof(size_); } } // namespace xgboost namespace dmlc { DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true); DMLC_DECLARE_TRAITS(has_saveload, xgboost::RowSet, true); } #endif // XGBOOST_DATA_H_
jtnormal_intel.c
#include <complex.h> #include <fftw/fftw3.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include <assert.h> #include <string.h> #include <omp.h> #include <xmmintrin.h> #include <mkl.h> #define BLK 4 inline void TransposeBLKxBLK(complex float * __restrict__ A, complex float * __restrict__ B) { int i, j; for (i = 0; i < BLK; i++) for (j = 0; j < BLK; j++) B[i*BLK + j] = A[j*BLK + i]; } inline void TransposePanel(complex float * __restrict__ cor_out, complex float * __restrict__ cor_out2, int _p, int tid, int dim0, int dim1) { int nblk0 = dim0 / BLK; int nblk1 = _p / BLK; for(int cc = 0 ; cc < nblk1 ; cc++) { for(int bb = 0 ; bb < nblk0 ; bb++) { int mine = (bb+tid)%nblk0; int b = mine * BLK; int c = cc * BLK; complex float buf1[BLK*BLK]; complex float buf2[BLK*BLK]; for(int i = 0 ; i < BLK ; i++) { #pragma simd for(int j = 0 ; j < BLK ; j++) { buf1[j + i*BLK] = cor_out[b + j + (c+i)*dim0]; } } TransposeBLKxBLK(buf1, buf2); for(int i = 0 ; i < BLK ; i++) { #pragma simd for(int j = 0 ; j < BLK ; j++) { cor_out2[c + j + (b+i)*dim1] = buf2[j + i*BLK]; } } } } for(int cc = nblk1*BLK ; cc < _p ; cc++) { #pragma simd for(int i = 0 ; i < dim0 ; i++) { cor_out2[cc + i*dim1] = cor_out[i + cc*dim0]; } } // Do extra columns for(int bb = nblk0*BLK ; bb < dim0 ; bb++) { for(int cc = 0 ; cc < _p ; cc++) { cor_out2[cc + bb*dim1] = cor_out[bb + cc*dim0]; } } } void jtmodel_normal_benchmark_fast_parallel( const complex float * __restrict__ sens, const float * __restrict__ stkern_mat, complex float * dst, const complex float * src, const unsigned long dim0, const unsigned long dim1, const unsigned long ncoils, const unsigned long nmaps, const unsigned long ncfimg, DFTI_DESCRIPTOR_HANDLE plan1d_0, DFTI_DESCRIPTOR_HANDLE plan1d_1, complex float * cfksp3, complex float * cfksp4) { struct timeval start, end; int nthr = omp_get_max_threads(); int P = (dim1 + nthr-1) / nthr; int P0 = (dim0 + nthr-1) / nthr; float sc = 1.0 / sqrt((double)dim0 * dim1); assert(nmaps == 1 || nmaps == 2); for(int coil = 0 ; coil < ncoils ; coil++) { #pragma omp parallel num_threads(nthr) { int tid = omp_get_thread_num(); int row_start = tid * P; int row_end = (tid+1) * P; if(row_end > dim1) row_end = dim1; for(int cfimg = 0 ; cfimg < ncfimg ; cfimg++) { for(int row = row_start ; row < row_end; row++) { const complex float *map0 = sens + coil * dim1 * dim0 + dim0 * row; const complex float *map1 = NULL; if (nmaps == 2) map1 = sens + coil * dim1 * dim0 + ncoils * dim0 * dim1 + dim0 * row; const complex float *cfimg0 = src + cfimg * dim0 * dim1 * nmaps + dim0 * row; const complex float *cfimg1 = NULL; if (nmaps == 2) cfimg1 = src + dim0 * dim1 + cfimg * dim0 * dim1 * nmaps + dim0 * row; complex float *cor_out = cfksp3 + cfimg * dim1 * dim0 + dim0 * row; #pragma simd for (int i = 0; i < dim0; i++) { if (nmaps == 2) cor_out[i] = (map0[i] * cfimg0[i] + map1[i] * cfimg1[i]) * sc; else cor_out[i] = (map0[i] * cfimg0[i]) * sc; } DftiComputeForward(plan1d_0, cor_out, cor_out); } complex float *cor_out = cfksp3 + cfimg * dim1 * dim0 + dim0 * row_start; complex float *cor_out2 = cfksp4 + cfimg * dim1 * dim0 + row_start; TransposePanel(cor_out, cor_out2, row_end-row_start, tid, dim0, dim1); } } #pragma omp parallel num_threads(nthr) { int tid = omp_get_thread_num(); int row_start = tid * P0; int row_end = (tid+1) * P0; if(row_end > dim0) row_end = dim0; complex float * stkern_tmp = (complex float*) malloc(dim1 * ncfimg * sizeof(complex float)); for (int row = row_start ; row < row_end ; row++) { for(int cfimg = 0 ; cfimg < ncfimg ; cfimg++) { complex float *cor_out = cfksp4 + cfimg * dim1 * dim0 + dim1 * row; DftiComputeForward(plan1d_1, cor_out, cor_out); } for(int cfimg_i = 0 ; cfimg_i < ncfimg ; cfimg_i++) { complex float *tmp = stkern_tmp + cfimg_i * dim1; for (int cfimg_j = 0; cfimg_j < ncfimg; cfimg_j++) { complex float *cfimg_in = cfksp4 + cfimg_j * dim0 * dim1 + row * dim1; const float *mat = (cfimg_i > cfimg_j) ? stkern_mat + cfimg_i * dim1 * dim0 + cfimg_j * dim1 * dim0 * ncfimg + row * dim1 : stkern_mat + cfimg_j * dim1 * dim0 + cfimg_i * dim1 * dim0 * ncfimg + row * dim1; if(cfimg_j == 0) { #pragma simd for (int pix = 0; pix < dim1; pix++) { tmp[pix] = (cfimg_in[pix] * mat[pix]); } } else { #pragma simd for (int pix = 0; pix < dim1; pix++) { tmp[pix] += (cfimg_in[pix] * mat[pix]); } } } DftiComputeBackward(plan1d_1, tmp, tmp); } for(int cfimg_i = 0 ; cfimg_i < ncfimg ; cfimg_i++) { complex float *cfimg_in = cfksp4 + cfimg_i * dim0 * dim1 + row * dim1; #pragma simd for (int pix = 0; pix < dim1; pix++) { cfimg_in[pix] = stkern_tmp[pix + cfimg_i*dim1]; } } } free(stkern_tmp); for(int cfimg_i = 0 ; cfimg_i < ncfimg ; cfimg_i++) { complex float *cfimg_in = cfksp4 + cfimg_i * dim0 * dim1 + row_start * dim1; complex float *cfimg_in2 = cfksp3 + cfimg_i * dim0 * dim1 + row_start; TransposePanel(cfimg_in, cfimg_in2, row_end-row_start, tid, dim1, dim0); } } #pragma omp parallel num_threads(nthr) { int tid = omp_get_thread_num(); int row_start = tid * P; int row_end = (tid+1) * P; if(row_end > dim1) row_end = dim1; for (int row = row_start ; row < row_end ; row++) { for (int cfimg = 0; cfimg < ncfimg; cfimg++) { const complex float *map0 = sens + coil*dim1*dim0 + row * dim0; const complex float *map1 = NULL; if (nmaps == 2) map1 = sens + coil*dim1*dim0 + ncoils *dim0 * dim1 + row * dim0; complex float *cor0 = dst + cfimg *dim1*dim0*nmaps + row * dim0; complex float* cor1 = NULL; if (nmaps == 2) cor1 = dst + dim1*dim0+cfimg*dim1*dim0*nmaps + row * dim0; complex float *cfimg_in = cfksp3 + cfimg*dim0*dim1 + row * dim0; DftiComputeBackward(plan1d_0, cfimg_in, cfimg_in); if(coil == 0) { #pragma simd for (int i = 0; i < dim0; i++) { cor0[i] = 0; if (nmaps == 2) cor1[i] = 0; } } #pragma simd for (int i = 0; i < dim0; i++) { float r0 = __real__ map0[i]; float i0 = __imag__ map0[i]; float r1 = 0; float i1 = 0; if (nmaps == 2) { r1 = __real__ map1[i]; i1 = __imag__ map1[i]; } float _r = __real__ cfimg_in[i]; float _i = __imag__ cfimg_in[i]; cor0[i] += ((r0 * _r + i0 * _i) + (r0 * _i - i0 * _r) * _Complex_I) * sc; if (nmaps == 2) cor1[i] += ((r1 * _r + i1 * _i) + (r1 * _i - i1 * _r) * _Complex_I) * sc; } } } } } } void jtmodel_adjoint_benchmark_fast_parallel( const complex float * __restrict__ sens, complex float * dst, const complex float * src, const unsigned long dim0, const unsigned long dim1, const unsigned long ncoils, const unsigned long nmaps, const unsigned long ncfimg, DFTI_DESCRIPTOR_HANDLE plan2d, complex float * cfksp3) { assert(nmaps == 1 || nmaps == 2); float sc = 1.0 / sqrt((double)dim0 * dim1); for(int coil = 0 ; coil < ncoils ; coil++) { const complex float * map0 = sens + coil * dim0 * dim1; const complex float * map1 = NULL; if (nmaps == 2) map1 = sens + coil * dim0 * dim1 + ncoils * dim0*dim1; for(int cfimg = 0 ; cfimg < ncfimg ; cfimg++) { complex float * ksp = (complex float*)src + coil*dim0*dim1 + cfimg*ncoils*dim0*dim1; DftiComputeBackward(plan2d, ksp, cfksp3); complex float * cor0 = dst + nmaps * cfimg * dim0 * dim1; complex float * cor1 = NULL; if (nmaps == 2) cor1 = dst + nmaps * cfimg * dim0 * dim1 + dim0*dim1; if(coil == 0) { #pragma omp parallel for #pragma simd for (int i = 0; i < dim0*dim1; i++) { cor0[i] = 0; if (nmaps == 2) cor1[i] = 0; } } #pragma omp parallel for #pragma simd for (int i = 0; i < dim0*dim1; i++) { float r0 = __real__ map0[i]; float i0 = __imag__ map0[i]; float r1 = 0.; float i1 = 0; if (nmaps == 2) { r1 = __real__ map1[i]; i1 = __imag__ map1[i]; } float _r = __real__ cfksp3[i]; float _i = __imag__ cfksp3[i]; cor0[i] += ((r0 * _r + i0 * _i) + (r0 * _i - i0 * _r) * _Complex_I) * sc; if (nmaps == 2) cor1[i] += ((r1 * _r + i1 * _i) + (r1 * _i - i1 * _r) * _Complex_I) * sc; } } } }
mttkrp.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "base.h" #include "mttkrp.h" #include "thd_info.h" #include "tile.h" #include "util.h" #include "splatt_debug.h" /* Check runtime within mttkrp routine */ #include "timer.h" #include "mutex_pool.h" /* XXX: this is a memory leak until cpd_ws is added/freed. */ static mutex_pool * pool = NULL; /** * @brief Function pointer that performs MTTKRP on a tile of a CSF tree. * * @param ct The CSF tensor. * @param tile_id The tile to process. * @param mats The matrices. * @param mode The output mode. * @param thds Thread structures. * @param partition A partitioning of the slices in the tensor, to distribute * to threads. Use the thread ID to decide which slices to * process. This may be NULL, in that case simply process all * slices. */ typedef void (* csf_mttkrp_func)( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const * const partition); /****************************************************************************** * PRIVATE FUNCTIONS *****************************************************************************/ /** * @brief Perform a reduction on thread-local MTTKRP outputs. * * @param ws MTTKRP workspace containing thread-local outputs. * @param global_output The global MTTKRP output we are reducing into. * @param nrows The number of rows in the MTTKRP. * @param ncols The number of columns in the MTTKRP. */ static void p_reduce_privatized( splatt_mttkrp_ws * const ws, val_t * const restrict global_output, idx_t const nrows, idx_t const ncols) { /* Ensure everyone has completed their local MTTKRP. */ #pragma omp barrier sp_timer_t reduction_timer; timer_fstart(&reduction_timer); int const tid = splatt_omp_get_thread_num(); idx_t const num_threads = splatt_omp_get_num_threads(); idx_t const elem_per_thread = (nrows * ncols) / num_threads; idx_t const start = tid * elem_per_thread; idx_t const stop = ((idx_t)tid == num_threads-1) ? (nrows * ncols) : (tid + 1) * elem_per_thread; /* reduction */ for(idx_t t=0; t < num_threads; ++t){ val_t const * const restrict thread_buf = ws->privatize_buffer[t]; for(idx_t x=start; x < stop; ++x) { global_output[x] += thread_buf[x]; } } timer_stop(&reduction_timer); #pragma omp master ws->reduction_time = reduction_timer.seconds; } /** * @brief Map MTTKRP functions onto a (possibly tiled) CSF tensor. This function * will handle any scheduling required with a partially tiled tensor. * * @param tensors An array of CSF representations. tensors[csf_id] is processed. * @param csf_id Which tensor are we processing? * @param atomic_func An MTTKRP function which atomically updates the output. * @param nosync_func An MTTKRP function which does not atomically update. * @param mats The matrices, with the output stored in mats[MAX_NMODES]. * @param mode Which mode of 'tensors' is the output (not CSF depth). * @param thds Thread structures. * @param ws MTTKRP workspace. */ static void p_schedule_tiles( splatt_csf const * const tensors, idx_t const csf_id, csf_mttkrp_func atomic_func, csf_mttkrp_func nosync_func, matrix_t ** mats, idx_t const mode, thd_info * const thds, splatt_mttkrp_ws * const ws) { splatt_csf const * const csf = &(tensors[csf_id]); idx_t const nmodes = csf->nmodes; idx_t const depth = nmodes - 1; idx_t const nrows = mats[mode]->I; idx_t const ncols = mats[mode]->J; /* Store old pointer */ val_t * const restrict global_output = mats[MAX_NMODES]->vals; #pragma omp parallel { int const tid = splatt_omp_get_thread_num(); timer_start(&thds[tid].ttime); idx_t const * const tile_partition = ws->tile_partition[csf_id]; idx_t const * const tree_partition = ws->tree_partition[csf_id]; /* * We may need to edit mats[MAX_NMODES]->vals, so create a private copy of * the pointers to edit. (NOT actual factors). */ matrix_t * mats_priv[MAX_NMODES+1]; for(idx_t m=0; m < MAX_NMODES; ++m) { mats_priv[m] = mats[m]; } /* each thread gets separate structure, but do a shallow copy */ mats_priv[MAX_NMODES] = splatt_malloc(sizeof(**mats_priv)); *(mats_priv[MAX_NMODES]) = *(mats[MAX_NMODES]); /* Give each thread its own private buffer and overwrite atomic * function. */ if(ws->is_privatized[mode]) { /* change (thread-private!) output structure */ memset(ws->privatize_buffer[tid], 0, nrows * ncols * sizeof(**(ws->privatize_buffer))); mats_priv[MAX_NMODES]->vals = ws->privatize_buffer[tid]; /* Don't use atomics if we privatized. */ atomic_func = nosync_func; } /* * Distribute tiles to threads in some fashion. */ if(csf->ntiles > 1) { /* We parallelize across tiles, and thus should not distribute within a * tree. This may change if we instead 'split' tiles across a few * threads. */ assert(tree_partition == NULL); /* mode is actually tiled -- avoid synchronization */ if(csf->tile_dims[mode] > 1) { idx_t tile_id = 0; /* foreach layer of tiles */ #pragma omp for schedule(dynamic, 1) nowait for(idx_t t=0; t < csf->tile_dims[mode]; ++t) { tile_id = get_next_tileid(TILE_BEGIN, csf->tile_dims, nmodes, mode, t); while(tile_id != TILE_END) { nosync_func(csf, tile_id, mats_priv, mode, thds, tree_partition); tile_id = get_next_tileid(tile_id, csf->tile_dims, nmodes, mode, t); } } /* tiled, but not this mode. Atomics are still necessary. */ } else { for(idx_t tile_id = tile_partition[tid]; tile_id < tile_partition[tid+1]; ++tile_id) { atomic_func(csf, tile_id, mats_priv, mode, thds, tree_partition); } } /* * Untiled, parallelize within kernel. */ } else { assert(tree_partition != NULL); atomic_func(csf, 0, mats_priv, mode, thds, tree_partition); } timer_stop(&thds[tid].ttime); /* If we used privatization, perform a reduction. */ if(ws->is_privatized[mode]) { p_reduce_privatized(ws, global_output, nrows, ncols); } splatt_free(mats_priv[MAX_NMODES]); } /* end omp parallel */ /* restore pointer */ mats[MAX_NMODES]->vals = global_output; } /** * @brief Should a certain mode should be privatized to avoid locks? * * @param csf The tensor (just used for dimensions). * @param mode The mode we are processing. * @param opts Options, storing the # threads and the threshold. * * @return true, if we should privatize. */ static bool p_is_privatized( splatt_csf const * const csf, idx_t const mode, double const * const opts) { idx_t const length = csf->dims[mode]; idx_t const nthreads = (idx_t) opts[SPLATT_OPTION_NTHREADS]; double const thresh = opts[SPLATT_OPTION_PRIVTHRESH]; /* don't bother if it is not multithreaded. */ if(nthreads == 1) { return false; } return (double)(length * nthreads) <= (thresh * (double)csf->nnz); } static inline void p_add_hada_clear( val_t * const restrict out, val_t * const restrict a, val_t const * const restrict b, idx_t const nfactors) { for(idx_t f=0; f < nfactors; ++f) { out[f] += a[f] * b[f]; a[f] = 0; } } static inline void p_assign_hada( val_t * const restrict out, val_t const * const restrict a, val_t const * const restrict b, idx_t const nfactors) { for(idx_t f=0; f < nfactors; ++f) { out[f] = a[f] * b[f]; } } static inline void p_csf_process_fiber_lock( val_t * const leafmat, val_t const * const restrict accumbuf, idx_t const nfactors, idx_t const start, idx_t const end, idx_t const * const restrict inds, val_t const * const restrict vals) { for(idx_t jj=start; jj < end; ++jj) { val_t * const restrict leafrow = leafmat + (inds[jj] * nfactors); val_t const v = vals[jj]; mutex_set_lock(pool, inds[jj]); for(idx_t f=0; f < nfactors; ++f) { leafrow[f] += v * accumbuf[f]; } mutex_unset_lock(pool, inds[jj]); } } static inline void p_csf_process_fiber_nolock( val_t * const leafmat, val_t const * const restrict accumbuf, idx_t const nfactors, idx_t const start, idx_t const end, idx_t const * const restrict inds, val_t const * const restrict vals) { for(idx_t jj=start; jj < end; ++jj) { val_t * const restrict leafrow = leafmat + (inds[jj] * nfactors); val_t const v = vals[jj]; for(idx_t f=0; f < nfactors; ++f) { leafrow[f] += v * accumbuf[f]; } } } static inline void p_csf_process_fiber( val_t * const restrict accumbuf, idx_t const nfactors, val_t const * const leafmat, idx_t const start, idx_t const end, idx_t const * const inds, val_t const * const vals) { /* foreach nnz in fiber */ for(idx_t j=start; j < end; ++j) { val_t const v = vals[j] ; val_t const * const restrict row = leafmat + (nfactors * inds[j]); for(idx_t f=0; f < nfactors; ++f) { accumbuf[f] += v * row[f]; } } } static inline void p_propagate_up( val_t * const out, val_t * const * const buf, idx_t * const restrict idxstack, idx_t const init_depth, idx_t const init_idx, idx_t const * const * const fp, idx_t const * const * const fids, val_t const * const restrict vals, val_t ** mvals, idx_t const nmodes, idx_t const nfactors) { /* push initial idx initialize idxstack */ idxstack[init_depth] = init_idx; for(idx_t m=init_depth+1; m < nmodes; ++m) { idxstack[m] = fp[m-1][idxstack[m-1]]; } assert(init_depth < nmodes-1); /* clear out accumulation buffer */ for(idx_t f=0; f < nfactors; ++f) { buf[init_depth+1][f] = 0; } while(idxstack[init_depth+1] < fp[init_depth][init_idx+1]) { /* skip to last internal mode */ idx_t depth = nmodes - 2; /* process all nonzeros [start, end) into buf[depth]*/ idx_t const start = fp[depth][idxstack[depth]]; idx_t const end = fp[depth][idxstack[depth]+1]; p_csf_process_fiber(buf[depth+1], nfactors, mvals[depth+1], start, end, fids[depth+1], vals); idxstack[depth+1] = end; /* exit early if there is no propagation to do... */ if(init_depth == nmodes-2) { for(idx_t f=0; f < nfactors; ++f) { out[f] = buf[depth+1][f]; } return; } /* Propagate up until we reach a node with more children to process */ do { /* propagate result up and clear buffer for next sibling */ val_t const * const restrict fibrow = mvals[depth] + (fids[depth][idxstack[depth]] * nfactors); p_add_hada_clear(buf[depth], buf[depth+1], fibrow, nfactors); ++idxstack[depth]; --depth; } while(depth > init_depth && idxstack[depth+1] == fp[depth][idxstack[depth]+1]); } /* end DFS */ /* copy to out */ for(idx_t f=0; f < nfactors; ++f) { out[f] = buf[init_depth+1][f]; } } static void p_csf_mttkrp_root_nolock3( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const * const restrict partition) { assert(ct->nmodes == 3); val_t const * const vals = ct->pt[tile_id].vals; idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0]; idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1]; idx_t const * const restrict sids = ct->pt[tile_id].fids[0]; idx_t const * const restrict fids = ct->pt[tile_id].fids[1]; idx_t const * const restrict inds = ct->pt[tile_id].fids[2]; val_t const * const avals = mats[csf_depth_to_mode(ct, 1)]->vals; val_t const * const bvals = mats[csf_depth_to_mode(ct, 2)]->vals; val_t * const ovals = mats[MAX_NMODES]->vals; idx_t const nfactors = mats[MAX_NMODES]->J; int const tid = splatt_omp_get_thread_num(); val_t * const restrict accumF = (val_t *) thds[tid].scratch[0]; /* write to output */ val_t * const restrict writeF = (val_t *) thds[tid].scratch[2]; for(idx_t r=0; r < nfactors; ++r) { writeF[r] = 0.; } /* break up loop by partition */ idx_t const nslices = ct->pt[tile_id].nfibs[0]; idx_t const start = (partition != NULL) ? partition[tid] : 0; idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices; for(idx_t s=start; s < stop; ++s) { idx_t const fid = (sids == NULL) ? s : sids[s]; val_t * const restrict mv = ovals + (fid * nfactors); /* foreach fiber in slice */ for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) { /* first entry of the fiber is used to initialize accumF */ idx_t const jjfirst = fptr[f]; val_t const vfirst = vals[jjfirst]; val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] = vfirst * bv[r]; } /* foreach nnz in fiber */ for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t const * const restrict bv = bvals + (inds[jj] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] += v * bv[r]; } } /* scale inner products by row of A and update to M */ val_t const * const restrict av = avals + (fids[f] * nfactors); for(idx_t r=0; r < nfactors; ++r) { writeF[r] += accumF[r] * av[r]; } } /* foreach fiber */ /* flush to output */ for(idx_t r=0; r < nfactors; ++r) { mv[r] += writeF[r]; writeF[r] = 0.; } } /* foreach slice (tree) */ } static void p_csf_mttkrp_root3_locked( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const * const restrict partition) { assert(ct->nmodes == 3); val_t const * const vals = ct->pt[tile_id].vals; idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0]; idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1]; idx_t const * const restrict sids = ct->pt[tile_id].fids[0]; idx_t const * const restrict fids = ct->pt[tile_id].fids[1]; idx_t const * const restrict inds = ct->pt[tile_id].fids[2]; val_t const * const avals = mats[csf_depth_to_mode(ct, 1)]->vals; val_t const * const bvals = mats[csf_depth_to_mode(ct, 2)]->vals; val_t * const ovals = mats[MAX_NMODES]->vals; idx_t const nfactors = mats[MAX_NMODES]->J; int const tid = splatt_omp_get_thread_num(); val_t * const restrict accumF = (val_t *) thds[tid].scratch[0]; /* write to output */ val_t * const restrict writeF = (val_t *) thds[tid].scratch[2]; for(idx_t r=0; r < nfactors; ++r) { writeF[r] = 0.; } idx_t const nslices = ct->pt[tile_id].nfibs[0]; idx_t const start = (partition != NULL) ? partition[tid] : 0; idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices; for(idx_t s=start; s < stop; ++s) { /* foreach fiber in slice */ for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) { /* first entry of the fiber is used to initialize accumF */ idx_t const jjfirst = fptr[f]; val_t const vfirst = vals[jjfirst]; val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] = vfirst * bv[r]; } /* foreach nnz in fiber */ for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t const * const restrict bv = bvals + (inds[jj] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] += v * bv[r]; } } /* scale inner products by row of A and update to M */ val_t const * const restrict av = avals + (fids[f] * nfactors); for(idx_t r=0; r < nfactors; ++r) { writeF[r] += accumF[r] * av[r]; } } idx_t const fid = (sids == NULL) ? s : sids[s]; val_t * const restrict mv = ovals + (fid * nfactors); /* flush to output */ mutex_set_lock(pool, fid); for(idx_t r=0; r < nfactors; ++r) { mv[r] += writeF[r]; writeF[r] = 0.; } mutex_unset_lock(pool, fid); } } static void p_csf_mttkrp_intl3_locked( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const * const restrict partition) { assert(ct->nmodes == 3); val_t const * const vals = ct->pt[tile_id].vals; idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0]; idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1]; idx_t const * const restrict sids = ct->pt[tile_id].fids[0]; idx_t const * const restrict fids = ct->pt[tile_id].fids[1]; idx_t const * const restrict inds = ct->pt[tile_id].fids[2]; val_t const * const avals = mats[csf_depth_to_mode(ct, 0)]->vals; val_t const * const bvals = mats[csf_depth_to_mode(ct, 2)]->vals; val_t * const ovals = mats[MAX_NMODES]->vals; idx_t const nfactors = mats[MAX_NMODES]->J; int const tid = splatt_omp_get_thread_num(); val_t * const restrict accumF = (val_t *) thds[tid].scratch[0]; idx_t const nslices = ct->pt[tile_id].nfibs[0]; idx_t const start = (partition != NULL) ? partition[tid] : 0; idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices; for(idx_t s=start; s < stop; ++s) { idx_t const fid = (sids == NULL) ? s : sids[s]; /* root row */ val_t const * const restrict rv = avals + (fid * nfactors); /* foreach fiber in slice */ for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) { /* first entry of the fiber is used to initialize accumF */ idx_t const jjfirst = fptr[f]; val_t const vfirst = vals[jjfirst]; val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] = vfirst * bv[r]; } /* foreach nnz in fiber */ for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t const * const restrict bv = bvals + (inds[jj] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] += v * bv[r]; } } /* write to fiber row */ val_t * const restrict ov = ovals + (fids[f] * nfactors); mutex_set_lock(pool, fids[f]); for(idx_t r=0; r < nfactors; ++r) { ov[r] += rv[r] * accumF[r]; } mutex_unset_lock(pool, fids[f]); } } } static void p_csf_mttkrp_leaf3_locked( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const * const restrict partition) { assert(ct->nmodes == 3); val_t const * const vals = ct->pt[tile_id].vals; idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0]; idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1]; idx_t const * const restrict sids = ct->pt[tile_id].fids[0]; idx_t const * const restrict fids = ct->pt[tile_id].fids[1]; idx_t const * const restrict inds = ct->pt[tile_id].fids[2]; val_t const * const avals = mats[csf_depth_to_mode(ct, 0)]->vals; val_t const * const bvals = mats[csf_depth_to_mode(ct, 1)]->vals; val_t * const ovals = mats[MAX_NMODES]->vals; idx_t const nfactors = mats[MAX_NMODES]->J; int const tid = splatt_omp_get_thread_num(); val_t * const restrict accumF = (val_t *) thds[tid].scratch[0]; idx_t const nslices = ct->pt[tile_id].nfibs[0]; idx_t const start = (partition != NULL) ? partition[tid] : 0; idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices; for(idx_t s=start; s < stop; ++s) { idx_t const fid = (sids == NULL) ? s : sids[s]; /* root row */ val_t const * const restrict rv = avals + (fid * nfactors); /* foreach fiber in slice */ for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) { /* fill fiber with hada */ val_t const * const restrict av = bvals + (fids[f] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] = rv[r] * av[r]; } /* foreach nnz in fiber, scale with hada and write to ovals */ for(idx_t jj=fptr[f]; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t * const restrict ov = ovals + (inds[jj] * nfactors); mutex_set_lock(pool, inds[jj]); for(idx_t r=0; r < nfactors; ++r) { ov[r] += v * accumF[r]; } mutex_unset_lock(pool, inds[jj]); } } } } static void p_csf_mttkrp_root_nolock( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const * const restrict partition) { /* extract tensor structures */ idx_t const nmodes = ct->nmodes; val_t const * const vals = ct->pt[tile_id].vals; /* empty tile, just return */ if(vals == NULL) { return; } if(nmodes == 3) { p_csf_mttkrp_root_nolock3(ct, tile_id, mats, mode, thds, partition); return; } idx_t const * const * const restrict fp = (idx_t const * const *) ct->pt[tile_id].fptr; idx_t const * const * const restrict fids = (idx_t const * const *) ct->pt[tile_id].fids; idx_t const nfactors = mats[0]->J; val_t * mvals[MAX_NMODES]; val_t * buf[MAX_NMODES]; idx_t idxstack[MAX_NMODES]; int const tid = splatt_omp_get_thread_num(); for(idx_t m=0; m < nmodes; ++m) { mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals; /* grab the next row of buf from thds */ buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m); memset(buf[m], 0, nfactors * sizeof(val_t)); } val_t * const ovals = mats[MAX_NMODES]->vals; idx_t const nfibs = ct->pt[tile_id].nfibs[0]; assert(nfibs <= mats[MAX_NMODES]->I); /* break up loop by partition */ idx_t const start = (partition != NULL) ? partition[tid] : 0; idx_t const stop = (partition != NULL) ? partition[tid+1] : nfibs; for(idx_t s=start; s < stop; ++s) { idx_t const fid = (fids[0] == NULL) ? s : fids[0][s]; assert(fid < mats[MAX_NMODES]->I); p_propagate_up(buf[0], buf, idxstack, 0, s, fp, fids, vals, mvals, nmodes, nfactors); val_t * const restrict orow = ovals + (fid * nfactors); val_t const * const restrict obuf = buf[0]; mutex_set_lock(pool, fid); for(idx_t f=0; f < nfactors; ++f) { orow[f] += obuf[f]; } mutex_unset_lock(pool, fid); } /* end foreach outer slice */ } static void p_csf_mttkrp_root_locked( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const * const restrict partition) { /* extract tensor structures */ idx_t const nmodes = ct->nmodes; val_t const * const vals = ct->pt[tile_id].vals; /* empty tile, just return */ if(vals == NULL) { return; } if(nmodes == 3) { p_csf_mttkrp_root3_locked(ct, tile_id, mats, mode, thds, partition); return; } idx_t const * const * const restrict fp = (idx_t const * const *) ct->pt[tile_id].fptr; idx_t const * const * const restrict fids = (idx_t const * const *) ct->pt[tile_id].fids; idx_t const nfactors = mats[0]->J; val_t * mvals[MAX_NMODES]; val_t * buf[MAX_NMODES]; idx_t idxstack[MAX_NMODES]; int const tid = splatt_omp_get_thread_num(); for(idx_t m=0; m < nmodes; ++m) { mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals; /* grab the next row of buf from thds */ buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m); memset(buf[m], 0, nfactors * sizeof(val_t)); } val_t * const ovals = mats[MAX_NMODES]->vals; idx_t const nfibs = ct->pt[tile_id].nfibs[0]; assert(nfibs <= mats[MAX_NMODES]->I); idx_t const start = (partition != NULL) ? partition[tid] : 0; idx_t const stop = (partition != NULL) ? partition[tid+1] : nfibs; for(idx_t s=start; s < stop; ++s) { idx_t const fid = (fids[0] == NULL) ? s : fids[0][s]; assert(fid < mats[MAX_NMODES]->I); p_propagate_up(buf[0], buf, idxstack, 0, s, fp, fids, vals, mvals, nmodes, nfactors); val_t * const restrict orow = ovals + (fid * nfactors); val_t const * const restrict obuf = buf[0]; mutex_set_lock(pool, fid); for(idx_t f=0; f < nfactors; ++f) { orow[f] += obuf[f]; } mutex_unset_lock(pool, fid); } /* end foreach outer slice */ } static void p_csf_mttkrp_leaf_nolock3( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const * const partition) { assert(ct->nmodes == 3); val_t const * const vals = ct->pt[tile_id].vals; idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0]; idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1]; idx_t const * const restrict sids = ct->pt[tile_id].fids[0]; idx_t const * const restrict fids = ct->pt[tile_id].fids[1]; idx_t const * const restrict inds = ct->pt[tile_id].fids[2]; val_t const * const avals = mats[csf_depth_to_mode(ct, 0)]->vals; val_t const * const bvals = mats[csf_depth_to_mode(ct, 1)]->vals; val_t * const ovals = mats[MAX_NMODES]->vals; idx_t const nfactors = mats[MAX_NMODES]->J; int const tid = splatt_omp_get_thread_num(); val_t * const restrict accumF = (val_t *) thds[tid].scratch[0]; idx_t const nslices = ct->pt[tile_id].nfibs[0]; idx_t const start = (partition != NULL) ? partition[tid] : 0; idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices; for(idx_t s=start; s < stop; ++s) { idx_t const fid = (sids == NULL) ? s : sids[s]; /* root row */ val_t const * const restrict rv = avals + (fid * nfactors); /* foreach fiber in slice */ for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) { /* fill fiber with hada */ val_t const * const restrict av = bvals + (fids[f] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] = rv[r] * av[r]; } /* foreach nnz in fiber, scale with hada and write to ovals */ for(idx_t jj=fptr[f]; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t * const restrict ov = ovals + (inds[jj] * nfactors); for(idx_t r=0; r < nfactors; ++r) { ov[r] += v * accumF[r]; } } } } } static void p_csf_mttkrp_leaf_nolock( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const * const partition) { val_t const * const vals = ct->pt[tile_id].vals; idx_t const nmodes = ct->nmodes; /* pass empty tiles */ if(vals == NULL) { return; } if(nmodes == 3) { p_csf_mttkrp_leaf_nolock3(ct, tile_id, mats, mode, thds, partition); return; } /* extract tensor structures */ idx_t const * const * const restrict fp = (idx_t const * const *) ct->pt[tile_id].fptr; idx_t const * const * const restrict fids = (idx_t const * const *) ct->pt[tile_id].fids; idx_t const nfactors = mats[0]->J; val_t * mvals[MAX_NMODES]; val_t * buf[MAX_NMODES]; idx_t idxstack[MAX_NMODES]; int const tid = splatt_omp_get_thread_num(); for(idx_t m=0; m < nmodes; ++m) { mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals; /* grab the next row of buf from thds */ buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m); } /* foreach outer slice */ idx_t const nouter = ct->pt[tile_id].nfibs[0]; idx_t const start = (partition != NULL) ? partition[tid] : 0; idx_t const stop = (partition != NULL) ? partition[tid+1] : nouter; for(idx_t s=start; s < stop; ++s) { idx_t const fid = (fids[0] == NULL) ? s : fids[0][s]; idxstack[0] = s; /* clear out stale data */ for(idx_t m=1; m < nmodes-1; ++m) { idxstack[m] = fp[m-1][idxstack[m-1]]; } /* first buf will always just be a matrix row */ val_t const * const rootrow = mvals[0] + (fid*nfactors); val_t * const rootbuf = buf[0]; for(idx_t f=0; f < nfactors; ++f) { rootbuf[f] = rootrow[f]; } idx_t depth = 0; idx_t const outer_end = fp[0][s+1]; while(idxstack[1] < outer_end) { /* move down to an nnz node */ for(; depth < nmodes-2; ++depth) { /* propogate buf down */ val_t const * const restrict drow = mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors); p_assign_hada(buf[depth+1], buf[depth], drow, nfactors); } /* process all nonzeros [start, end) */ idx_t const start = fp[depth][idxstack[depth]]; idx_t const end = fp[depth][idxstack[depth]+1]; p_csf_process_fiber_nolock(mats[MAX_NMODES]->vals, buf[depth], nfactors, start, end, fids[depth+1], vals); /* now move back up to the next unprocessed child */ do { ++idxstack[depth]; --depth; } while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]); } /* end DFS */ } /* end outer slice loop */ } static void p_csf_mttkrp_leaf_locked( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const * const restrict partition) { /* extract tensor structures */ val_t const * const vals = ct->pt[tile_id].vals; idx_t const nmodes = ct->nmodes; if(vals == NULL) { return; } if(nmodes == 3) { p_csf_mttkrp_leaf3_locked(ct, tile_id, mats, mode, thds, partition); return; } idx_t const * const * const restrict fp = (idx_t const * const *) ct->pt[tile_id].fptr; idx_t const * const * const restrict fids = (idx_t const * const *) ct->pt[tile_id].fids; idx_t const nfactors = mats[0]->J; val_t * mvals[MAX_NMODES]; val_t * buf[MAX_NMODES]; idx_t idxstack[MAX_NMODES]; int const tid = splatt_omp_get_thread_num(); for(idx_t m=0; m < nmodes; ++m) { mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals; /* grab the next row of buf from thds */ buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m); } /* foreach outer slice */ idx_t const nslices = ct->pt[tile_id].nfibs[0]; idx_t const start = (partition != NULL) ? partition[tid] : 0; idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices; for(idx_t s=start; s < stop; ++s) { idx_t const fid = (fids[0] == NULL) ? s : fids[0][s]; idxstack[0] = s; /* clear out stale data */ for(idx_t m=1; m < nmodes-1; ++m) { idxstack[m] = fp[m-1][idxstack[m-1]]; } /* first buf will always just be a matrix row */ val_t const * const restrict rootrow = mvals[0] + (fid*nfactors); val_t * const rootbuf = buf[0]; for(idx_t f=0; f < nfactors; ++f) { rootbuf[f] = rootrow[f]; } idx_t depth = 0; idx_t const outer_end = fp[0][s+1]; while(idxstack[1] < outer_end) { /* move down to an nnz node */ for(; depth < nmodes-2; ++depth) { /* propogate buf down */ val_t const * const restrict drow = mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors); p_assign_hada(buf[depth+1], buf[depth], drow, nfactors); } /* process all nonzeros [start, end) */ idx_t const start = fp[depth][idxstack[depth]]; idx_t const end = fp[depth][idxstack[depth]+1]; p_csf_process_fiber_lock(mats[MAX_NMODES]->vals, buf[depth], nfactors, start, end, fids[depth+1], vals); /* now move back up to the next unprocessed child */ do { ++idxstack[depth]; --depth; } while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]); } /* end DFS */ } /* end outer slice loop */ } static void p_csf_mttkrp_intl_nolock3( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const * const partition) { assert(ct->nmodes == 3); val_t const * const vals = ct->pt[tile_id].vals; idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0]; idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1]; idx_t const * const restrict sids = ct->pt[tile_id].fids[0]; idx_t const * const restrict fids = ct->pt[tile_id].fids[1]; idx_t const * const restrict inds = ct->pt[tile_id].fids[2]; val_t const * const avals = mats[csf_depth_to_mode(ct, 0)]->vals; val_t const * const bvals = mats[csf_depth_to_mode(ct, 2)]->vals; val_t * const ovals = mats[MAX_NMODES]->vals; idx_t const nfactors = mats[MAX_NMODES]->J; int const tid = splatt_omp_get_thread_num(); val_t * const restrict accumF = (val_t *) thds[tid].scratch[0]; idx_t const nslices = ct->pt[tile_id].nfibs[0]; idx_t const start = (partition != NULL) ? partition[tid] : 0; idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices; for(idx_t s=start; s < stop; ++s) { idx_t const fid = (sids == NULL) ? s : sids[s]; /* root row */ val_t const * const restrict rv = avals + (fid * nfactors); /* foreach fiber in slice */ for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) { /* first entry of the fiber is used to initialize accumF */ idx_t const jjfirst = fptr[f]; val_t const vfirst = vals[jjfirst]; val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] = vfirst * bv[r]; } /* foreach nnz in fiber */ for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t const * const restrict bv = bvals + (inds[jj] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] += v * bv[r]; } } /* write to fiber row */ val_t * const restrict ov = ovals + (fids[f] * nfactors); for(idx_t r=0; r < nfactors; ++r) { ov[r] += rv[r] * accumF[r]; } } } } static void p_csf_mttkrp_intl_nolock( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const * const partition) { /* extract tensor structures */ idx_t const nmodes = ct->nmodes; val_t const * const vals = ct->pt[tile_id].vals; /* pass empty tiles */ if(vals == NULL) { return; } if(nmodes == 3) { p_csf_mttkrp_intl_nolock3(ct, tile_id, mats, mode, thds, partition); return; } idx_t const * const * const restrict fp = (idx_t const * const *) ct->pt[tile_id].fptr; idx_t const * const * const restrict fids = (idx_t const * const *) ct->pt[tile_id].fids; idx_t const nfactors = mats[0]->J; /* find out which level in the tree this is */ idx_t const outdepth = csf_mode_to_depth(ct, mode); val_t * mvals[MAX_NMODES]; val_t * buf[MAX_NMODES]; idx_t idxstack[MAX_NMODES]; int const tid = splatt_omp_get_thread_num(); for(idx_t m=0; m < nmodes; ++m) { mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals; /* grab the next row of buf from thds */ buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m); memset(buf[m], 0, nfactors * sizeof(val_t)); } val_t * const ovals = mats[MAX_NMODES]->vals; /* foreach outer slice */ idx_t const nslices = ct->pt[tile_id].nfibs[0]; idx_t const start = (partition != NULL) ? partition[tid] : 0; idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices; for(idx_t s=start; s < stop; ++s) { idx_t const fid = (fids[0] == NULL) ? s : fids[0][s]; /* push outer slice and fill stack */ idxstack[0] = s; for(idx_t m=1; m <= outdepth; ++m) { idxstack[m] = fp[m-1][idxstack[m-1]]; } /* fill first buf */ val_t const * const restrict rootrow = mvals[0] + (fid*nfactors); for(idx_t f=0; f < nfactors; ++f) { buf[0][f] = rootrow[f]; } /* process entire subtree */ idx_t depth = 0; while(idxstack[1] < fp[0][s+1]) { /* propagate values down to outdepth-1 */ for(; depth < outdepth; ++depth) { val_t const * const restrict drow = mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors); p_assign_hada(buf[depth+1], buf[depth], drow, nfactors); } /* write to output and clear buf[outdepth] for next subtree */ idx_t const noderow = fids[outdepth][idxstack[outdepth]]; /* propagate value up to buf[outdepth] */ p_propagate_up(buf[outdepth], buf, idxstack, outdepth,idxstack[outdepth], fp, fids, vals, mvals, nmodes, nfactors); val_t * const restrict outbuf = ovals + (noderow * nfactors); p_add_hada_clear(outbuf, buf[outdepth], buf[outdepth-1], nfactors); /* backtrack to next unfinished node */ do { ++idxstack[depth]; --depth; } while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]); } /* end DFS */ } /* end foreach outer slice */ } static void p_csf_mttkrp_intl_locked( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const * const partition) { /* extract tensor structures */ idx_t const nmodes = ct->nmodes; val_t const * const vals = ct->pt[tile_id].vals; /* pass empty tiles */ if(vals == NULL) { return; } if(nmodes == 3) { p_csf_mttkrp_intl3_locked(ct, tile_id, mats, mode, thds, partition); return; } idx_t const * const * const restrict fp = (idx_t const * const *) ct->pt[tile_id].fptr; idx_t const * const * const restrict fids = (idx_t const * const *) ct->pt[tile_id].fids; idx_t const nfactors = mats[0]->J; /* find out which level in the tree this is */ idx_t const outdepth = csf_mode_to_depth(ct, mode); val_t * mvals[MAX_NMODES]; val_t * buf[MAX_NMODES]; idx_t idxstack[MAX_NMODES]; int const tid = splatt_omp_get_thread_num(); for(idx_t m=0; m < nmodes; ++m) { mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals; /* grab the next row of buf from thds */ buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m); memset(buf[m], 0, nfactors * sizeof(val_t)); } val_t * const ovals = mats[MAX_NMODES]->vals; /* foreach outer slice */ idx_t const nslices = ct->pt[tile_id].nfibs[0]; idx_t const start = (partition != NULL) ? partition[tid] : 0; idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices; for(idx_t s=start; s < stop; ++s) { idx_t const fid = (fids[0] == NULL) ? s : fids[0][s]; /* push outer slice and fill stack */ idxstack[0] = s; for(idx_t m=1; m <= outdepth; ++m) { idxstack[m] = fp[m-1][idxstack[m-1]]; } /* fill first buf */ val_t const * const restrict rootrow = mvals[0] + (fid*nfactors); for(idx_t f=0; f < nfactors; ++f) { buf[0][f] = rootrow[f]; } /* process entire subtree */ idx_t depth = 0; while(idxstack[1] < fp[0][s+1]) { /* propagate values down to outdepth-1 */ for(; depth < outdepth; ++depth) { val_t const * const restrict drow = mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors); p_assign_hada(buf[depth+1], buf[depth], drow, nfactors); } /* write to output and clear buf[outdepth] for next subtree */ idx_t const noderow = fids[outdepth][idxstack[outdepth]]; /* propagate value up to buf[outdepth] */ p_propagate_up(buf[outdepth], buf, idxstack, outdepth,idxstack[outdepth], fp, fids, vals, mvals, nmodes, nfactors); val_t * const restrict outbuf = ovals + (noderow * nfactors); mutex_set_lock(pool, noderow); p_add_hada_clear(outbuf, buf[outdepth], buf[outdepth-1], nfactors); mutex_unset_lock(pool, noderow); /* backtrack to next unfinished node */ do { ++idxstack[depth]; --depth; } while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]); } /* end DFS */ } /* end foreach outer slice */ } /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ void mttkrp_csf( splatt_csf const * const tensors, matrix_t ** mats, idx_t const mode, thd_info * const thds, splatt_mttkrp_ws * const ws, splatt_global_opts const * const global_opts) { /* ensure we use as many threads as our partitioning supports */ splatt_omp_set_num_threads(ws->num_threads); if(pool == NULL) { pool = mutex_alloc(); } /* clear output matrix */ matrix_t * const M = mats[MAX_NMODES]; M->I = tensors[0].dims[mode]; memset(M->vals, 0, M->I * M->J * sizeof(val_t)); idx_t const nmodes = tensors[0].nmodes; /* reset thread times */ thd_reset(thds, splatt_omp_get_max_threads()); /* choose which MTTKRP function to use */ idx_t const which_csf = ws->mode_csf_map[mode]; idx_t const outdepth = csf_mode_to_depth(&(tensors[which_csf]), mode); if(outdepth == 0) { /* root */ p_schedule_tiles(tensors, which_csf, p_csf_mttkrp_root_locked, p_csf_mttkrp_root_nolock, mats, mode, thds, ws); } else if(outdepth == nmodes - 1) { /* leaf */ p_schedule_tiles(tensors, which_csf, p_csf_mttkrp_leaf_locked, p_csf_mttkrp_leaf_nolock, mats, mode, thds, ws); } else { /* internal */ p_schedule_tiles(tensors, which_csf, p_csf_mttkrp_intl_locked, p_csf_mttkrp_intl_nolock, mats, mode, thds, ws); } #if 0 /* print thread times, if requested */ if(global_opts->verbosity == SPLATT_VERBOSITY_MAX) { printf("MTTKRP mode %"SPLATT_PF_IDX": ", mode+1); thd_time_stats(thds, splatt_omp_get_max_threads()); if(ws->is_privatized[mode]) { printf(" reduction-time: %0.3fs\n", ws->reduction_time); } } #endif thd_reset(thds, splatt_omp_get_max_threads()); } /****************************************************************************** * DEPRECATED FUNCTIONS *****************************************************************************/ /****************************************************************************** * SPLATT MTTKRP *****************************************************************************/ void mttkrp_splatt( ftensor_t const * const ft, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const nthreads) { if(ft->tiled == SPLATT_SYNCTILE) { mttkrp_splatt_sync_tiled(ft, mats, mode, thds, nthreads); return; } if(ft->tiled == SPLATT_COOPTILE) { mttkrp_splatt_coop_tiled(ft, mats, mode, thds, nthreads); return; } matrix_t * const M = mats[MAX_NMODES]; matrix_t const * const A = mats[ft->dim_perm[1]]; matrix_t const * const B = mats[ft->dim_perm[2]]; idx_t const nslices = ft->dims[mode]; idx_t const rank = M->J; val_t * const mvals = M->vals; memset(mvals, 0, ft->dims[mode] * rank * sizeof(val_t)); val_t const * const avals = A->vals; val_t const * const bvals = B->vals; idx_t const * const restrict sptr = ft->sptr; idx_t const * const restrict fptr = ft->fptr; idx_t const * const restrict fids = ft->fids; idx_t const * const restrict inds = ft->inds; val_t const * const restrict vals = ft->vals; #pragma omp parallel { int const tid = splatt_omp_get_thread_num(); val_t * const restrict accumF = (val_t *) thds[tid].scratch[0]; timer_start(&thds[tid].ttime); #pragma omp for schedule(dynamic, 16) nowait for(idx_t s=0; s < nslices; ++s) { val_t * const restrict mv = mvals + (s * rank); /* foreach fiber in slice */ for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) { /* first entry of the fiber is used to initialize accumF */ idx_t const jjfirst = fptr[f]; val_t const vfirst = vals[jjfirst]; val_t const * const restrict bv = bvals + (inds[jjfirst] * rank); for(idx_t r=0; r < rank; ++r) { accumF[r] = vfirst * bv[r]; } /* foreach nnz in fiber */ for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t const * const restrict bv = bvals + (inds[jj] * rank); for(idx_t r=0; r < rank; ++r) { accumF[r] += v * bv[r]; } } /* scale inner products by row of A and update to M */ val_t const * const restrict av = avals + (fids[f] * rank); for(idx_t r=0; r < rank; ++r) { mv[r] += accumF[r] * av[r]; } } } timer_stop(&thds[tid].ttime); } /* end parallel region */ } void mttkrp_splatt_sync_tiled( ftensor_t const * const ft, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const nthreads) { matrix_t * const M = mats[MAX_NMODES]; matrix_t const * const A = mats[ft->dim_perm[1]]; matrix_t const * const B = mats[ft->dim_perm[2]]; idx_t const nslabs = ft->nslabs; idx_t const rank = M->J; val_t * const mvals = M->vals; memset(mvals, 0, ft->dims[mode] * rank * sizeof(val_t)); val_t const * const avals = A->vals; val_t const * const bvals = B->vals; idx_t const * const restrict slabptr = ft->slabptr; idx_t const * const restrict sids = ft->sids; idx_t const * const restrict fptr = ft->fptr; idx_t const * const restrict fids = ft->fids; idx_t const * const restrict inds = ft->inds; val_t const * const restrict vals = ft->vals; #pragma omp parallel { int const tid = splatt_omp_get_thread_num(); val_t * const restrict accumF = (val_t *) thds[tid].scratch[0]; timer_start(&thds[tid].ttime); #pragma omp for schedule(dynamic, 1) nowait for(idx_t s=0; s < nslabs; ++s) { /* foreach fiber in slice */ for(idx_t f=slabptr[s]; f < slabptr[s+1]; ++f) { /* first entry of the fiber is used to initialize accumF */ idx_t const jjfirst = fptr[f]; val_t const vfirst = vals[jjfirst]; val_t const * const restrict bv = bvals + (inds[jjfirst] * rank); for(idx_t r=0; r < rank; ++r) { accumF[r] = vfirst * bv[r]; } /* foreach nnz in fiber */ for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t const * const restrict bv = bvals + (inds[jj] * rank); for(idx_t r=0; r < rank; ++r) { accumF[r] += v * bv[r]; } } /* scale inner products by row of A and update to M */ val_t * const restrict mv = mvals + (sids[f] * rank); val_t const * const restrict av = avals + (fids[f] * rank); for(idx_t r=0; r < rank; ++r) { mv[r] += accumF[r] * av[r]; } } } timer_stop(&thds[tid].ttime); } /* end parallel region */ } void mttkrp_splatt_coop_tiled( ftensor_t const * const ft, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const nthreads) { matrix_t * const M = mats[MAX_NMODES]; matrix_t const * const A = mats[ft->dim_perm[1]]; matrix_t const * const B = mats[ft->dim_perm[2]]; idx_t const nslabs = ft->nslabs; idx_t const rank = M->J; val_t * const mvals = M->vals; memset(mvals, 0, ft->dims[mode] * rank * sizeof(val_t)); val_t const * const avals = A->vals; val_t const * const bvals = B->vals; idx_t const * const restrict slabptr = ft->slabptr; idx_t const * const restrict sptr = ft->sptr; idx_t const * const restrict sids = ft->sids; idx_t const * const restrict fptr = ft->fptr; idx_t const * const restrict fids = ft->fids; idx_t const * const restrict inds = ft->inds; val_t const * const restrict vals = ft->vals; #pragma omp parallel { int const tid = splatt_omp_get_thread_num(); val_t * const restrict accumF = (val_t *) thds[tid].scratch[0]; val_t * const localm = (val_t *) thds[tid].scratch[1]; timer_start(&thds[tid].ttime); /* foreach slab */ for(idx_t s=0; s < nslabs; ++s) { /* foreach fiber in slab */ #pragma omp for schedule(dynamic, 8) for(idx_t sl=slabptr[s]; sl < slabptr[s+1]; ++sl) { idx_t const slice = sids[sl]; for(idx_t f=sptr[sl]; f < sptr[sl+1]; ++f) { /* first entry of the fiber is used to initialize accumF */ idx_t const jjfirst = fptr[f]; val_t const vfirst = vals[jjfirst]; val_t const * const restrict bv = bvals + (inds[jjfirst] * rank); for(idx_t r=0; r < rank; ++r) { accumF[r] = vfirst * bv[r]; } /* foreach nnz in fiber */ for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t const * const restrict bv = bvals + (inds[jj] * rank); for(idx_t r=0; r < rank; ++r) { accumF[r] += v * bv[r]; } } /* scale inner products by row of A and update thread-local M */ val_t * const restrict mv = localm + ((slice % TILE_SIZES[0]) * rank); val_t const * const restrict av = avals + (fids[f] * rank); for(idx_t r=0; r < rank; ++r) { mv[r] += accumF[r] * av[r]; } } } idx_t const start = s * TILE_SIZES[0]; idx_t const stop = SS_MIN((s+1) * TILE_SIZES[0], ft->dims[mode]); #pragma omp for schedule(static) for(idx_t i=start; i < stop; ++i) { /* map i back to global slice id */ idx_t const localrow = i % TILE_SIZES[0]; for(idx_t t=0; t < nthreads; ++t) { val_t * const threadm = (val_t *) thds[t].scratch[1]; for(idx_t r=0; r < rank; ++r) { mvals[r + (i*rank)] += threadm[r + (localrow*rank)]; threadm[r + (localrow*rank)] = 0.; } } } } /* end foreach slab */ timer_stop(&thds[tid].ttime); } /* end omp parallel */ } /****************************************************************************** * GIGA MTTKRP *****************************************************************************/ void mttkrp_giga( spmatrix_t const * const spmat, matrix_t ** mats, idx_t const mode, val_t * const scratch) { matrix_t * const M = mats[MAX_NMODES]; matrix_t const * const A = mode == 0 ? mats[1] : mats[0]; matrix_t const * const B = mode == 2 ? mats[1] : mats[2]; idx_t const I = spmat->I; idx_t const rank = M->J; idx_t const * const restrict rowptr = spmat->rowptr; idx_t const * const restrict colind = spmat->colind; val_t const * const restrict vals = spmat->vals; #pragma omp parallel { for(idx_t r=0; r < rank; ++r) { val_t * const restrict mv = M->vals + (r * I); val_t const * const restrict av = A->vals + (r * A->I); val_t const * const restrict bv = B->vals + (r * B->I); /* Joined Hadamard products of X, C, and B */ #pragma omp for schedule(dynamic, 16) for(idx_t i=0; i < I; ++i) { for(idx_t y=rowptr[i]; y < rowptr[i+1]; ++y) { idx_t const a = colind[y] / B->I; idx_t const b = colind[y] % B->I; scratch[y] = vals[y] * av[a] * bv[b]; } } /* now accumulate rows into column of M1 */ #pragma omp for schedule(dynamic, 16) for(idx_t i=0; i < I; ++i) { val_t sum = 0; for(idx_t y=rowptr[i]; y < rowptr[i+1]; ++y) { sum += scratch[y]; } mv[i] = sum; } } } } /****************************************************************************** * TTBOX MTTKRP *****************************************************************************/ void mttkrp_ttbox( sptensor_t const * const tt, matrix_t ** mats, idx_t const mode, val_t * const scratch) { matrix_t * const M = mats[MAX_NMODES]; matrix_t const * const A = mode == 0 ? mats[1] : mats[0]; matrix_t const * const B = mode == 2 ? mats[1] : mats[2]; idx_t const I = tt->dims[mode]; idx_t const rank = M->J; memset(M->vals, 0, I * rank * sizeof(val_t)); idx_t const nnz = tt->nnz; idx_t const * const restrict indM = tt->ind[mode]; idx_t const * const restrict indA = mode == 0 ? tt->ind[1] : tt->ind[0]; idx_t const * const restrict indB = mode == 2 ? tt->ind[1] : tt->ind[2]; val_t const * const restrict vals = tt->vals; for(idx_t r=0; r < rank; ++r) { val_t * const restrict mv = M->vals + (r * I); val_t const * const restrict av = A->vals + (r * A->I); val_t const * const restrict bv = B->vals + (r * B->I); /* stretch out columns of A and B */ #pragma omp parallel for for(idx_t x=0; x < nnz; ++x) { scratch[x] = vals[x] * av[indA[x]] * bv[indB[x]]; } /* now accumulate into m1 */ for(idx_t x=0; x < nnz; ++x) { mv[indM[x]] += scratch[x]; } } } /* * Non omp version for streaming mode */ void mttkrp_stream_( sptensor_t const * const tt, matrix_t ** mats, idx_t const mode) { if(pool == NULL) { pool = mutex_alloc(); } matrix_t * const M = mats[MAX_NMODES]; idx_t const I = tt->dims[mode]; idx_t const nfactors = M->J; val_t * const outmat = M->vals; #pragma omp parallel for schedule(static) for(idx_t x=0; x < I * nfactors; ++x) { outmat[x] = 0.; } idx_t const nmodes = tt->nmodes; val_t * mvals[MAX_NMODES]; for(idx_t m=0; m < nmodes; ++m) { mvals[m] = mats[m]->vals; } val_t const * const restrict vals = tt->vals; val_t * restrict accum = splatt_malloc(nfactors * sizeof(*accum)); /* stream through nnz */ for(idx_t n=0; n < tt->nnz; ++n) { /* initialize with value */ for(idx_t f=0; f < nfactors; ++f) { accum[f] = vals[n]; } for(idx_t m=0; m < nmodes; ++m) { if(m == mode) { continue; } #if 0 if(tt->ind[m][n] >= mats[m]->I) { printf("%lu >= %lu\n", tt->ind[m][n], mats[m]->I); } #endif assert(tt->ind[m][n] < mats[m]->I); val_t const * const restrict inrow = mvals[m] + \ (tt->ind[m][n] * nfactors); for(idx_t f=0; f < nfactors; ++f) { accum[f] *= inrow[f]; } } /* write to output */ idx_t const out_ind = tt->ind[mode][n]; // val_t * const outrow = outmat + (tt->ind[mode][n] * nfactors); val_t * const restrict outrow = outmat + (tt->ind[mode][n] * nfactors); mutex_set_lock(pool, out_ind); for(idx_t f=0; f < nfactors; ++f) { outrow[f] += accum[f]; } mutex_unset_lock(pool, out_ind); } splatt_free(accum); } void mttkrp_stream_no_lock( sptensor_t const * const tt, matrix_t ** mats, idx_t const mode) { matrix_t * const M = mats[MAX_NMODES]; // printf("\n\n\n Printing input mat \n\n"); // print_matrix_(M); idx_t const I = tt->dims[mode]; idx_t const nfactors = M->J; val_t * const outmat = M->vals; idx_t num_threads; #pragma omp parallel { num_threads = omp_get_num_threads(); #pragma omp for schedule(static) for(idx_t x=0; x < I * nfactors; ++x) { outmat[x] = 0.; } } idx_t const nmodes = tt->nmodes; val_t * mvals[MAX_NMODES]; for(idx_t m=0; m < nmodes; ++m) { mvals[m] = mats[m]->vals; } val_t const * const restrict vals = tt->vals; #pragma omp parallel { val_t * restrict accum = splatt_malloc(nfactors * sizeof(*accum)); val_t * outrow_tmp = splatt_malloc(nfactors * num_threads * sizeof(*outrow_tmp)); /* stream through nnz */ #pragma omp for schedule(static) for(idx_t n=0; n < tt->nnz; ++n) { idx_t tid = omp_get_thread_num(); // Initialize buffer to zero for (idx_t i=0; i < nfactors * num_threads; i++) { outrow_tmp[i] = 0.0; } /* initialize with value */ for(idx_t f=0; f < nfactors; ++f) { accum[f] = vals[n]; } for(idx_t m=0; m < nmodes; ++m) { if(m == mode) { continue; } assert(tt->ind[m][n] < mats[m]->I); val_t const * const restrict inrow = mvals[m] + \ (tt->ind[m][n] * nfactors); for(idx_t f=0; f < nfactors; ++f) { accum[f] *= inrow[f]; } } /* write to buf */ for(idx_t f=0; f < nfactors; f++) { idx_t _f = nfactors * tid + f; outrow_tmp[_f] += accum[f]; } idx_t const out_ind = tt->ind[mode][n]; val_t * const restrict outrow = outmat + (tt->ind[mode][n] * nfactors); // Implicit barrier required // Reduce outrow_tmp among all threads if (tid == 0) { for(idx_t f = 0; f < nfactors; f++) { val_t tmp = 0.; for(idx_t t = 0; t < num_threads; t++) { tmp += outrow_tmp[t * nfactors + f]; } outrow[f] = tmp; } } } /* end foreach tt->nnz */ splatt_free(accum); splatt_free(outrow_tmp); } /* end omp parallel */ // Printing Outmat // printf("\n\n Printing outmat \n\n"); // print_matrix_(M); } void mttkrp_stream_wo_lock( sptensor_t const * const tt, matrix_t ** mats, idx_t const mode) { matrix_t * const M = mats[MAX_NMODES]; idx_t const I = tt->dims[mode]; assert(I == 1); idx_t const nfactors = M->J; val_t * const outmat = M->vals; idx_t num_threads; #pragma omp parallel { num_threads = omp_get_num_threads(); #pragma omp for schedule(static) for(idx_t x=0; x < I * nfactors; ++x) { outmat[x] = 0.; } } idx_t const nmodes = tt->nmodes; val_t* outmat_temp = (val_t*) splatt_malloc(num_threads * nfactors * sizeof(*outmat_temp)); #pragma omp for schedule(static) for(idx_t x=0; x < nfactors * num_threads; ++x) { outmat_temp[x] = 0.; } val_t * mvals[MAX_NMODES]; for(idx_t m=0; m < nmodes; ++m) { mvals[m] = mats[m]->vals; } val_t const * const restrict vals = tt->vals; #pragma omp parallel { val_t * restrict accum = splatt_malloc(nfactors * sizeof(*accum)); /* stream through nnz */ #pragma omp for schedule(static) for(idx_t n=0; n < tt->nnz; ++n) { idx_t tid = omp_get_thread_num(); /* initialize with value */ for(idx_t f=0; f < nfactors; ++f) { accum[f] = vals[n]; } for(idx_t m=0; m < nmodes; ++m) { if(m == mode) { continue; } assert(tt->ind[m][n] < mats[m]->I); val_t const * const restrict inrow = mvals[m] + \ (tt->ind[m][n] * nfactors); for(idx_t f=0; f < nfactors; ++f) { accum[f] *= inrow[f]; } } /* write to temp array */ // idx_t row_ind = tid * I + tt->ind[mode][n]; val_t* tmp_outrow = outmat_temp + (tid * nfactors); for (idx_t f = 0; f < nfactors; f++) { tmp_outrow[f] += accum[f]; } } // Implicit barrier #pragma omp for schedule(static) for(idx_t i = 0; i < nfactors; ++i) { val_t * const restrict outrow = outmat; for(idx_t t = 0; t < num_threads; ++t) { outrow[i] += outmat_temp[t * nfactors + i]; } } /* #pragma omp for schedule(static) for(idx_t n=0; n < tt->nnz; ++n) { // write to output idx_t const out_ind = tt->ind[mode][n]; val_t * const restrict outrow = outmat + (tt->ind[mode][n] * nfactors); for(idx_t i = 0; i < nfactors; i++) { val_t _accum = 0.0; for(idx_t _tid = 0; _tid < num_threads; _tid++) { _accum += outmat_temp[(_tid * I + out_ind) * nfactors + i]; } outrow[i] = _accum; } } */ splatt_free(accum); } /* end omp parallel */ splatt_free(outmat_temp); } void mttkrp_stream( sptensor_t const * const tt, matrix_t ** mats, idx_t const mode) { if(pool == NULL) { pool = mutex_alloc(); } matrix_t * const M = mats[MAX_NMODES]; // printf("\n\n\n Printing input mat \n\n"); // print_matrix_(M); idx_t const I = tt->dims[mode]; idx_t const nfactors = M->J; val_t * const outmat = M->vals; /* sp_timer_t total_runtime; sp_timer_t init_output; sp_timer_t write_output; timer_fstart(&total_runtime); timer_fstart(&init_output); */ // printf("mttkrp_stream,%d,%d\n", mode, I); /* clear output */ #pragma omp parallel for schedule(static) for(idx_t x=0; x < I * nfactors; ++x) { outmat[x] = 0.; } idx_t const nmodes = tt->nmodes; val_t * mvals[MAX_NMODES]; for(idx_t m=0; m < nmodes; ++m) { mvals[m] = mats[m]->vals; } val_t const * const restrict vals = tt->vals; /* timer_stop(&init_output); timer_fstart(&write_output); */ #pragma omp parallel { val_t * restrict accum = splatt_malloc(nfactors * sizeof(*accum)); /* stream through nnz */ #pragma omp for schedule(static) // #pragma omp for for(idx_t n=0; n < tt->nnz; ++n) { /* initialize with value */ for(idx_t f=0; f < nfactors; ++f) { accum[f] = vals[n]; } for(idx_t m=0; m < nmodes; ++m) { if(m == mode) { continue; } #if 0 if(tt->ind[m][n] >= mats[m]->I) { printf("%lu >= %lu\n", tt->ind[m][n], mats[m]->I); } #endif assert(tt->ind[m][n] < mats[m]->I); val_t const * const restrict inrow = mvals[m] + \ (tt->ind[m][n] * nfactors); for(idx_t f=0; f < nfactors; ++f) { accum[f] *= inrow[f]; } } /* write to output */ idx_t const out_ind = tt->ind[mode][n]; // val_t * const outrow = outmat + (tt->ind[mode][n] * nfactors); val_t * const restrict outrow = outmat + (tt->ind[mode][n] * nfactors); mutex_set_lock(pool, out_ind); for(idx_t f=0; f < nfactors; ++f) { outrow[f] += accum[f]; } mutex_unset_lock(pool, out_ind); } splatt_free(accum); } /* end omp parallel */ // printf("\n\n Printing outmat \n\n"); // print_matrix_(M); /* timer_stop(&write_output); timer_stop(&total_runtime); printf( "%d, %d, %f, %f, %f\n", mode, I, init_output.seconds * 1e9, write_output.seconds * 1e9, total_runtime.seconds * 1e9 ); */ } /****************************************************************************** * API FUNCTIONS *****************************************************************************/ int splatt_mttkrp( splatt_idx_t const mode, splatt_idx_t const ncolumns, splatt_csf const * const tensors, splatt_val_t ** matrices, splatt_val_t * const matout, double const * const options) { idx_t const nmodes = tensors->nmodes; /* fill matrix pointers */ matrix_t * mats[MAX_NMODES+1]; for(idx_t m=0; m < nmodes; ++m) { mats[m] = (matrix_t *) splatt_malloc(sizeof(matrix_t)); mats[m]->I = tensors->dims[m]; mats[m]->J = ncolumns, mats[m]->rowmajor = 1; mats[m]->vals = matrices[m]; } mats[MAX_NMODES] = (matrix_t *) splatt_malloc(sizeof(matrix_t)); mats[MAX_NMODES]->I = tensors->dims[mode]; mats[MAX_NMODES]->J = ncolumns; mats[MAX_NMODES]->rowmajor = 1; mats[MAX_NMODES]->vals = matout; /* Setup thread structures. + 64 bytes is to avoid false sharing. */ idx_t const nthreads = (idx_t) options[SPLATT_OPTION_NTHREADS]; splatt_omp_set_num_threads(nthreads); thd_info * thds = thd_init(nthreads, 3, (nmodes * ncolumns * sizeof(val_t)) + 64, 0, (nmodes * ncolumns * sizeof(val_t)) + 64); splatt_mttkrp_ws * ws = splatt_mttkrp_alloc_ws(tensors, ncolumns, options); /* XXX should be API param? */ splatt_global_opts * gopts = splatt_alloc_global_opts(); /* do the MTTKRP */ mttkrp_csf(tensors, mats, mode, thds, ws, gopts); splatt_free_global_opts(gopts); splatt_mttkrp_free_ws(ws); /* cleanup */ thd_free(thds, nthreads); for(idx_t m=0; m < nmodes; ++m) { free(mats[m]); } free(mats[MAX_NMODES]); return SPLATT_SUCCESS; } splatt_mttkrp_ws * splatt_mttkrp_alloc_ws( splatt_csf const * const tensors, splatt_idx_t const ncolumns, double const * const opts) { splatt_mttkrp_ws * ws = splatt_malloc(sizeof(*ws)); idx_t num_csf = 0; #ifdef _OPENMP idx_t const num_threads = (idx_t) opts[SPLATT_OPTION_NTHREADS]; #else idx_t const num_threads = 1; #endif ws->num_threads = num_threads; /* map each MTTKRP mode to a CSF tensor */ splatt_csf_type which_csf = (splatt_csf_type) opts[SPLATT_OPTION_CSF_ALLOC]; for(idx_t m=0; m < tensors->nmodes; ++m) { switch(which_csf) { case SPLATT_CSF_ONEMODE: /* only one tensor, map is easy */ ws->mode_csf_map[m] = 0; num_csf = 1; break; case SPLATT_CSF_TWOMODE: /* last mode is mapped to second tensor */ ws->mode_csf_map[m] = 0; if(csf_mode_to_depth(&(tensors[0]), m) == tensors->nmodes-1) { ws->mode_csf_map[m] = 1; } num_csf = 2; break; case SPLATT_CSF_ALLMODE: /* each mode has its own tensor, map is easy */ ws->mode_csf_map[m] = m; num_csf = tensors->nmodes; break; /* XXX */ default: fprintf(stderr, "SPLATT: CSF type '%d' not recognized.\n", which_csf); abort(); break; } } assert(num_csf > 0); ws->num_csf = num_csf; /* Now setup partition info for each CSF. */ for(idx_t c=0; c < num_csf; ++c) { ws->tile_partition[c] = NULL; ws->tree_partition[c] = NULL; } for(idx_t c=0; c < num_csf; ++c) { splatt_csf const * const csf = &(tensors[c]); if(tensors[c].ntiles > 1) { ws->tile_partition[c] = csf_partition_tiles_1d(csf, num_threads); } else { ws->tree_partition[c] = csf_partition_1d(csf, 0, num_threads); } } /* allocate privatization buffer */ idx_t largest_priv_dim = 0; ws->privatize_buffer = splatt_malloc(num_threads * sizeof(*(ws->privatize_buffer))); for(idx_t m=0; m < tensors->nmodes; ++m) { ws->is_privatized[m] = p_is_privatized(tensors, m, opts); if(ws->is_privatized[m]) { largest_priv_dim = SS_MAX(largest_priv_dim, tensors->dims[m]); if((int)opts[SPLATT_OPTION_VERBOSITY] == SPLATT_VERBOSITY_MAX) { printf("PRIVATIZING-MODE: %"SPLATT_PF_IDX"\n", m+1); } } } for(idx_t t=0; t < num_threads; ++t) { ws->privatize_buffer[t] = splatt_malloc(largest_priv_dim * ncolumns * sizeof(**(ws->privatize_buffer))); } if(largest_priv_dim > 0 && (int)opts[SPLATT_OPTION_VERBOSITY] == SPLATT_VERBOSITY_MAX) { size_t bytes = num_threads * largest_priv_dim * ncolumns * sizeof(**(ws->privatize_buffer)); char * bstr = bytes_str(bytes); printf("PRIVATIZATION-BUF: %s\n", bstr); printf("\n"); free(bstr); } return ws; } void splatt_mttkrp_free_ws( splatt_mttkrp_ws * const ws) { for(idx_t t=0; t < ws->num_threads; ++t) { splatt_free(ws->privatize_buffer[t]); } splatt_free(ws->privatize_buffer); for(idx_t c=0; c < ws->num_csf; ++c) { splatt_free(ws->tile_partition[c]); splatt_free(ws->tree_partition[c]); } splatt_free(ws); }
serial_tree_learner.h
#ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #include <LightGBM/utils/random.h> #include <LightGBM/utils/array_args.h> #include <LightGBM/tree_learner.h> #include <LightGBM/dataset.h> #include <LightGBM/tree.h> #include <LightGBM/feature.h> #include "feature_histogram.hpp" #include "data_partition.hpp" #include "split_info.hpp" #include "leaf_splits.hpp" #include <cstdio> #include <vector> #include <random> #include <cmath> #include <memory> namespace LightGBM { /*! * \brief Used for learning a tree by single machine */ class SerialTreeLearner: public TreeLearner { public: explicit SerialTreeLearner(const TreeConfig& tree_config); ~SerialTreeLearner(); void Init(const Dataset* train_data) override; Tree* Train(const score_t* gradients, const score_t *hessians) override; void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override { data_partition_->SetUsedDataIndices(used_indices, num_data); } void AddPredictionToScore(score_t* out_score) const override { #pragma omp parallel for schedule(guided) for (int i = 0; i < data_partition_->num_leaves(); ++i) { score_t output = static_cast<score_t>(last_trained_tree_->LeafOutput(i)); data_size_t cnt_leaf_data = 0; auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data); for (data_size_t j = 0; j < cnt_leaf_data; ++j) { out_score[tmp_idx[j]] += output; } } } protected: /*! * \brief Some initial works before training */ virtual void BeforeTrain(); /*! * \brief Some initial works before FindBestSplit */ virtual bool BeforeFindBestSplit(int left_leaf, int right_leaf); /*! * \brief Find best thresholds for all features, using multi-threading. * The result will be stored in smaller_leaf_splits_ and larger_leaf_splits_. * This function will be called in FindBestSplit. */ virtual void FindBestThresholds(); /*! * \brief Find best features for leaves from smaller_leaf_splits_ and larger_leaf_splits_. * This function will be called after FindBestThresholds. */ inline virtual void FindBestSplitsForLeaves(); /*! * \brief Partition tree and data according best split. * \param tree Current tree, will be splitted on this function. * \param best_leaf The index of leaf that will be splitted. * \param left_leaf The index of left leaf after splitted. * \param right_leaf The index of right leaf after splitted. */ virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf); /*! * \brief Get the number of data in a leaf * \param leaf_idx The index of leaf * \return The number of data in the leaf_idx leaf */ inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const; /*! * \brief Find best features for leaf from leaf_splits * \param leaf_splits */ inline void FindBestSplitForLeaf(LeafSplits* leaf_splits); /*! \brief Last trained decision tree */ const Tree* last_trained_tree_; /*! \brief number of data */ data_size_t num_data_; /*! \brief number of features */ int num_features_; /*! \brief training data */ const Dataset* train_data_; /*! \brief gradients of current iteration */ const score_t* gradients_; /*! \brief hessians of current iteration */ const score_t* hessians_; /*! \brief training data partition on leaves */ std::unique_ptr<DataPartition> data_partition_; /*! \brief used for generate used features */ Random random_; /*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */ std::vector<bool> is_feature_used_; /*! \brief pointer to histograms array of parent of current leaves */ FeatureHistogram* parent_leaf_histogram_array_; /*! \brief pointer to histograms array of smaller leaf */ FeatureHistogram* smaller_leaf_histogram_array_; /*! \brief pointer to histograms array of larger leaf */ FeatureHistogram* larger_leaf_histogram_array_; /*! \brief store best split points for all leaves */ std::vector<SplitInfo> best_split_per_leaf_; /*! \brief stores best thresholds for all feature for smaller leaf */ std::unique_ptr<LeafSplits> smaller_leaf_splits_; /*! \brief stores best thresholds for all feature for larger leaf */ std::unique_ptr<LeafSplits> larger_leaf_splits_; /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t> ordered_hessians_; /*! \brief Pointer to ordered_gradients_, use this to avoid copy at BeforeTrain */ const score_t* ptr_to_ordered_gradients_smaller_leaf_; /*! \brief Pointer to ordered_hessians_, use this to avoid copy at BeforeTrain*/ const score_t* ptr_to_ordered_hessians_smaller_leaf_; /*! \brief Pointer to ordered_gradients_, use this to avoid copy at BeforeTrain */ const score_t* ptr_to_ordered_gradients_larger_leaf_; /*! \brief Pointer to ordered_hessians_, use this to avoid copy at BeforeTrain*/ const score_t* ptr_to_ordered_hessians_larger_leaf_; /*! \brief Store ordered bin */ std::vector<std::unique_ptr<OrderedBin>> ordered_bins_; /*! \brief True if has ordered bin */ bool has_ordered_bin_ = false; /*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */ std::vector<char> is_data_in_leaf_; /*! \brief used to cache historical histogram to speed up*/ HistogramPool histogram_pool_; /*! \brief config of tree learner*/ const TreeConfig& tree_config_; }; inline void SerialTreeLearner::FindBestSplitsForLeaves() { FindBestSplitForLeaf(smaller_leaf_splits_.get()); FindBestSplitForLeaf(larger_leaf_splits_.get()); } inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leafIdx) const { if (leafIdx >= 0) { return data_partition_->leaf_count(leafIdx); } else { return 0; } } inline void SerialTreeLearner::FindBestSplitForLeaf(LeafSplits* leaf_splits) { if (leaf_splits == nullptr || leaf_splits->LeafIndex() < 0) { return; } std::vector<float> gains; for (size_t i = 0; i < leaf_splits->BestSplitPerFeature().size(); ++i) { gains.push_back(leaf_splits->BestSplitPerFeature()[i].gain); } int best_feature = static_cast<int>(ArrayArgs<float>::ArgMax(gains)); int leaf = leaf_splits->LeafIndex(); best_split_per_leaf_[leaf] = leaf_splits->BestSplitPerFeature()[best_feature]; best_split_per_leaf_[leaf].feature = best_feature; } } // namespace LightGBM #endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
rawmd5u_fmt_plug.c
/* * Thick raw-md5-unicode (come-back :) * * This software is Copyright (c) 2011 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rawmd5uthick; #elif FMT_REGISTERS_H john_register_one(&fmt_rawmd5uthick); #else #include <string.h> #include "arch.h" #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5) #endif #include "simd-intrinsics.h" #include "md5.h" #include "misc.h" #include "common.h" #include "formats.h" #include "options.h" #include "unicode.h" #include "memory.h" #include "johnswap.h" #include "memdbg.h" #define FORMAT_LABEL "Raw-MD5u" #define FORMAT_NAME "" #define ALGORITHM_NAME "md5(utf16($p)) " MD5_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define CIPHERTEXT_LENGTH 32 #define BINARY_SIZE 16 #define BINARY_ALIGN 4 #define SALT_SIZE 0 #define SALT_ALIGN 1 #ifdef SIMD_COEF_32 #define BLOCK_LOOPS 1 #define PLAINTEXT_LENGTH 27 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS * BLOCK_LOOPS #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32*4 ) #else #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #ifdef SIMD_COEF_32 static unsigned char (*saved_key); static unsigned char (*crypt_key); static unsigned int (**buf_ptr); #else static MD5_CTX ctx; static int saved_len; static UTF16 saved_key[PLAINTEXT_LENGTH + 1]; static uint32_t crypt_key[BINARY_SIZE / 4]; #endif /* Note some plaintexts will be replaced in init() if running UTF-8 */ static struct fmt_tests tests[] = { {"16c47151c18ac087cd12b3a70746c790", "test1"}, {"d41d8cd98f00b204e9800998ecf8427e", ""}, {"d41d8cd98f00b204e9800998ecf8427e", ""}, {"d41d8cd98f00b204e9800998ecf8427e", ""}, {"d41d8cd98f00b204e9800998ecf8427e", ""}, {"d41d8cd98f00b204e9800998ecf8427e", ""}, {"9c3abef89ff76f8acd80eae37b35f64f", "test2"}, {"849ee1b88b5d887bdb058180a666b450", "test3"}, {"8c4cb7e8b33b56a833cdaa8673f3b425", "test4"}, {"537e738b1ac5551f65106368dc301ece", "thatsworking"}, // repeat first hash in exactly the same form that is used in john.pot {"$dynamic_29$16c47151c18ac087cd12b3a70746c790", "test1"}, {NULL} }; static void set_key_utf8(char *_key, int index); static void set_key_CP(char *_key, int index); static void init(struct fmt_main *self) { #if SIMD_COEF_32 int i; #endif if (options.target_enc == UTF_8) { /* This avoids an if clause for every set_key */ self->methods.set_key = set_key_utf8; #if SIMD_COEF_32 /* kick it up from 27. We will truncate in setkey_utf8() */ self->params.plaintext_length = 3 * PLAINTEXT_LENGTH; #endif tests[1].ciphertext = "94a4e171de16580742c4d141e6607bf7"; tests[1].plaintext = "\xE2\x82\xAC"; // Euro sign tests[2].ciphertext = "03c60810f0e54d16e826aca385d776c8"; tests[2].plaintext = "\xE2\x82\xAC\xE2\x82\xAC"; // 2 x euro tests[3].ciphertext = "2d554433d7cde7ec8d16aaf126c3be6b"; tests[3].plaintext = "\xE2\x82\xAC\xC3\xBC"; // euro and u-umlaut tests[4].ciphertext = "8007d9070b27db7b30433df2cd10abc1"; tests[4].plaintext = "\xC3\xBC\xE2\x82\xAC"; // u-umlaut and euro } else { if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) { /* This avoids an if clause for every set_key */ self->methods.set_key = set_key_CP; } if (CP_to_Unicode[0xfc] == 0x00fc) { tests[1].ciphertext = "ea7ab2b5c07650badab30790d0c9b63e"; tests[1].plaintext = "\xFC"; // German u-umlaut in iso-8859-1 tests[2].ciphertext = "f0a0b9f1dea0e458cec9a284ff434d44"; tests[2].plaintext = "\xFC\xFC"; tests[3].ciphertext = "d25a0b436b768777cc9a343d283dbf5a"; tests[3].plaintext = "\xFC\xFC\xFC"; tests[4].ciphertext = "719917322bf12168f8c55939e4fec8de"; tests[4].plaintext = "\xFC\xFC\xFC\xFC"; } } #if SIMD_COEF_32 saved_key = mem_calloc_align(sizeof(*saved_key), 64*self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(sizeof(*crypt_key), BINARY_SIZE*self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); buf_ptr = mem_calloc_align(sizeof(*buf_ptr), self->params.max_keys_per_crypt, sizeof(*buf_ptr)); for (i=0; i<self->params.max_keys_per_crypt; i++) buf_ptr[i] = (unsigned int*)&saved_key[GETPOS(0, i)]; #endif } static void done(void) { #ifdef SIMD_COEF_32 MEM_FREE(buf_ptr); MEM_FREE(crypt_key); MEM_FREE(saved_key); #endif } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[32+12+1]; if (!strncmp(ciphertext, "$dynamic_29$", 12)) ciphertext += 12; strcpy(out, "$dynamic_29$"); memcpy(&out[12], ciphertext, 32); out[sizeof(out)-1] = 0; strlwr(&out[12]); return out; } static int valid(char *ciphertext, struct fmt_main *self) { char *pos; if (!strncmp(ciphertext, "$dynamic_29$", 12)) ciphertext += 12; for (pos = ciphertext; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++); if (!*pos && pos - ciphertext == CIPHERTEXT_LENGTH) return 1; else return 0; } static void *get_binary(char *ciphertext) { static union { unsigned long dummy; unsigned int i[BINARY_SIZE/sizeof(unsigned int)]; } _out; unsigned int *out = _out.i; unsigned int i; unsigned int temp; ciphertext+=12; for (i=0; i<4; i++) { temp = ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+0])]))<<4; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+1])])); temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+2])]))<<12; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+3])]))<<8; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+4])]))<<20; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+5])]))<<16; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+6])]))<<28; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+7])]))<<24; #if ARCH_LITTLE_ENDIAN out[i]=temp; #else out[i]=JOHNSWAP(temp); #endif } return out; } // ISO-8859-1 to UCS-2, directly into vector key buffer static void set_key(char *_key, int index) { #ifdef SIMD_COEF_32 const unsigned char *key = (unsigned char*)_key; unsigned int *keybuf_word = buf_ptr[index]; unsigned int len, temp2; len = 0; while((temp2 = *key++)) { unsigned int temp; if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1) { temp2 |= (temp << 16); *keybuf_word = temp2; } else { temp2 |= (0x80 << 16); *keybuf_word = temp2; len++; goto key_cleaning; } len += 2; keybuf_word += SIMD_COEF_32; } *keybuf_word = 0x80; key_cleaning: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } ((unsigned int *)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4; #else #if ARCH_LITTLE_ENDIAN UTF8 *s = (UTF8*)_key; UTF16 *d = saved_key; while (*s) *d++ = *s++; *d = 0; saved_len = (int)((char*)d - (char*)saved_key); #else UTF8 *s = (UTF8*)_key; UTF8 *d = (UTF8*)saved_key; while (*s) { *d++ = *s++; ++d; } *d = 0; saved_len = (int)((char*)d - (char*)saved_key); #endif #endif } // Legacy codepage to UCS-2, directly into vector key buffer static void set_key_CP(char *_key, int index) { #ifdef SIMD_COEF_32 const unsigned char *key = (unsigned char*)_key; unsigned int *keybuf_word = buf_ptr[index]; unsigned int len, temp2; len = 0; while((temp2 = *key++)) { unsigned int temp; temp2 = CP_to_Unicode[temp2]; if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1) { temp = CP_to_Unicode[temp]; temp2 |= (temp << 16); *keybuf_word = temp2; } else { temp2 |= (0x80 << 16); *keybuf_word = temp2; len++; goto key_cleaning_enc; } len += 2; keybuf_word += SIMD_COEF_32; } *keybuf_word = 0x80; key_cleaning_enc: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } ((unsigned int *)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4; #else saved_len = enc_to_utf16((UTF16*)&saved_key, PLAINTEXT_LENGTH + 1, (unsigned char*)_key, strlen(_key)) << 1; if (saved_len < 0) saved_len = strlen16(saved_key); #endif } // UTF-8 to UCS-2, directly into vector key buffer static void set_key_utf8(char *_key, int index) { #ifdef SIMD_COEF_32 const UTF8 *source = (UTF8*)_key; unsigned int *keybuf_word = buf_ptr[index]; UTF32 chl, chh = 0x80; unsigned int len = 0; while (*source) { chl = *source; if (chl >= 0xC0) { unsigned int extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f]; switch (extraBytesToRead) { case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 2: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 1: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 0: break; default: goto bailout; } chl -= offsetsFromUTF8[extraBytesToRead]; } source++; len++; if (chl > UNI_MAX_BMP) { if (len == PLAINTEXT_LENGTH) { chh = 0x80; *keybuf_word = (chh << 16) | chl; keybuf_word += SIMD_COEF_32; break; } #define halfBase 0x0010000UL #define halfShift 10 #define halfMask 0x3FFUL #define UNI_SUR_HIGH_START (UTF32)0xD800 #define UNI_SUR_LOW_START (UTF32)0xDC00 chl -= halfBase; chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);; chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START); len++; } else if (*source && len < PLAINTEXT_LENGTH) { chh = *source; if (chh >= 0xC0) { unsigned int extraBytesToRead = opt_trailingBytesUTF8[chh & 0x3f]; switch (extraBytesToRead) { case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 2: ++source; if (*source) { chh <<= 6; chh += *source; } else goto bailout; case 1: ++source; if (*source) { chh <<= 6; chh += *source; } else goto bailout; case 0: break; default: goto bailout; } chh -= offsetsFromUTF8[extraBytesToRead]; } source++; len++; } else { chh = 0x80; *keybuf_word = (chh << 16) | chl; keybuf_word += SIMD_COEF_32; break; } *keybuf_word = (chh << 16) | chl; keybuf_word += SIMD_COEF_32; } if (chh != 0x80 || len == 0) { *keybuf_word = 0x80; keybuf_word += SIMD_COEF_32; } bailout: while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } ((unsigned int *)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4; #else saved_len = utf8_to_utf16((UTF16*)&saved_key, PLAINTEXT_LENGTH + 1, (unsigned char*)_key, strlen(_key)) << 1; if (saved_len < 0) saved_len = strlen16(saved_key); #endif } static char *get_key(int index) { #ifdef SIMD_COEF_32 // Get the key back from the key buffer, from UCS-2 unsigned int *keybuffer = (unsigned int*)&saved_key[GETPOS(0, index)]; static UTF16 key[PLAINTEXT_LENGTH + 1 + 1]; // if only +1 we 'can' overflow. Not sure why, but ASan found it. unsigned int md5_size=0; unsigned int i=0; for (; md5_size < PLAINTEXT_LENGTH; i += SIMD_COEF_32, md5_size++) { key[md5_size] = keybuffer[i]; key[md5_size+1] = keybuffer[i] >> 16; if (key[md5_size] == 0x80 && key[md5_size+1] == 0) { key[md5_size] = 0; break; } ++md5_size; if (key[md5_size] == 0x80 && ((keybuffer[i+SIMD_COEF_32]&0xFFFF) == 0 || md5_size == PLAINTEXT_LENGTH)) { key[md5_size] = 0; break; } } return (char*)utf16_to_enc(key); #else return (char*)utf16_to_enc(saved_key); #endif } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x,y=0; for (;y<SIMD_PARA_MD5*BLOCK_LOOPS;y++) for (x=0;x<SIMD_COEF_32;x++) { if ( ((uint32_t*)binary)[0] == ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4] ) return 1; } return 0; #else return !memcmp(binary, crypt_key, BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { return (1); } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; if ( ((uint32_t*)binary)[0] != ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4] ) return 0; if ( ((uint32_t*)binary)[1] != ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4+SIMD_COEF_32] ) return 0; if ( ((uint32_t*)binary)[2] != ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4+2*SIMD_COEF_32] ) return 0; if ( ((uint32_t*)binary)[3] != ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4+3*SIMD_COEF_32] ) return 0; return 1; #else return !memcmp(binary, crypt_key, BINARY_SIZE); #endif } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; #if defined(SIMD_COEF_32) #if (BLOCK_LOOPS > 1) int i; // This was an experiment. It's not used (unless you bump BLOCK_LOOPS), // cause it does not scale well. We would need to parallelize set_key() #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < BLOCK_LOOPS; i++) SIMDmd5body(&saved_key[i*NBKEYS*64], (unsigned int*)&crypt_key[i*NBKEYS*BINARY_SIZE], NULL, SSEi_MIXED_IN); #else SIMDmd5body(saved_key, (unsigned int*)crypt_key, NULL, SSEi_MIXED_IN); #endif #else MD5_Init( &ctx ); MD5_Update(&ctx, (unsigned char*)saved_key, saved_len); MD5_Final((unsigned char*) crypt_key, &ctx); #endif return count; } #ifdef SIMD_COEF_32 static int get_hash_0(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_0; } static int get_hash_1(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_1; } static int get_hash_2(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_2; } static int get_hash_3(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_3; } static int get_hash_4(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_4; } static int get_hash_5(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_5; } static int get_hash_6(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_6; } #else static int get_hash_0(int index) { return ((uint32_t*)crypt_key)[index] & PH_MASK_0; } static int get_hash_1(int index) { return ((uint32_t*)crypt_key)[index] & PH_MASK_1; } static int get_hash_2(int index) { return ((uint32_t*)crypt_key)[index] & PH_MASK_2; } static int get_hash_3(int index) { return ((uint32_t*)crypt_key)[index] & PH_MASK_3; } static int get_hash_4(int index) { return ((uint32_t*)crypt_key)[index] & PH_MASK_4; } static int get_hash_5(int index) { return ((uint32_t*)crypt_key)[index] & PH_MASK_5; } static int get_hash_6(int index) { return ((uint32_t*)crypt_key)[index] & PH_MASK_6; } #endif struct fmt_main fmt_rawmd5uthick = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #if (BLOCK_LOOPS > 1) && defined(SSE_MD5_PARA) FMT_OMP | #endif FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE, { NULL }, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unop__sin_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__sin_fc64_fc64) // op(A') function: GB (_unop_tran__sin_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = csin (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = csin (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = csin (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIN || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__sin_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = csin (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = csin (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__sin_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
syr2k.c
/** * syr2k.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <assert.h> #include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> #include "../../common/polybenchUtilFuncts.h" // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.10 /* Problem size */ #define N 1024 #define M 1024 #define GPU 1 /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 12435 #define BETA 4546 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { C[i * N + j] = ((DATA_TYPE)i * j + 2) / N; } for (j = 0; j < M; j++) { A[i * N + j] = ((DATA_TYPE)i * j) / N; B[i * N + j] = ((DATA_TYPE)i * j + 1) / N; } } } void syr2k(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { C[i * N + j] *= BETA; } } for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < M; k++) { C[i * N + j] += ALPHA * A[i * M + k] * B[j * M + k]; C[i * N + j] += ALPHA * B[i * M + k] * A[j * M + k]; } } } } void syr2k_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { C[i * N + j] *= BETA; } } #pragma omp target device(GPU) map(to : A[:N * M], B[:N * M]) \ map(tofrom : C[:N * N]) #pragma omp parallel for collapse(2) for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < M; k++) { C[i * N + j] += ALPHA * A[i * M + k] * B[j * M + k]; C[i * N + j] += ALPHA * B[i * M + k] * A[j * M + k]; } } } } void compareResults(DATA_TYPE *C, DATA_TYPE *C_Gpu) { int i, j, fail; fail = 0; // Compare C with D for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { if (percentDiff(C[i * N + j], C_Gpu[i * N + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } int main() { double t_start, t_end; DATA_TYPE *A; DATA_TYPE *B; DATA_TYPE *C; DATA_TYPE *C_Gpu; A = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE)); B = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE)); C = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE)); C_Gpu = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE)); fprintf(stdout, "<< Symmetric rank-2k operations >>\n"); init_arrays(A, B, C_Gpu); t_start = rtclock(); syr2k_OMP(A, B, C_Gpu); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); init_arrays(A, B, C); t_start = rtclock(); syr2k(A, B, C); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(C, C_Gpu); free(A); free(B); free(C); free(C_Gpu); return 0; }
lis_precision_matvec.c
/* Copyright (C) 2002-2012 The SSI Project. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "lis_config.h" #else #ifdef HAVE_CONFIG_WIN32_H #include "lis_config_win32.h" #endif #endif #include <stdio.h> #include <stdlib.h> #ifdef HAVE_MALLOC_H #include <malloc.h> #endif #include <string.h> #include <math.h> #ifdef USE_SSE2 #include <emmintrin.h> #endif #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_MPI #include <mpi.h> #endif #include "lislib.h" #ifdef USE_QUAD_PRECISION void lis_matvec_crs_mp(LIS_MATRIX A, LIS_VECTOR X, LIS_VECTOR Y) { LIS_INT i,j,n; LIS_INT is,ie,jj,j0; LIS_INT *jj0; LIS_SCALAR *vv0; LIS_SCALAR *x,*y,*xl,*yl; LIS_QUAD_DECLAR; n = A->n; x = X->value; y = Y->value; xl = X->value_lo; yl = Y->value_lo; if( A->is_splited ) { #ifdef _OPENMP #ifndef USE_SSE2 #pragma omp parallel for private(i,j,is,ie,j0,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel for private(i,j,is,ie,j0,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif #endif for(i=0;i<n;i++) { #ifndef USE_SSE2 LIS_QUAD_MULD(y[i],yl[i],x[i],xl[i],A->D->value[i]); #else LIS_QUAD_MULD_SSE2(y[i],yl[i],x[i],xl[i],A->D->value[i]); #endif is = A->L->ptr[i]; ie = A->L->ptr[i+1]; for(j=is;j<ie-0;j+=1) { j0 = A->L->index[j+0]; #ifndef USE_SSE2 LIS_QUAD_FMAD(y[i],yl[i],y[i],yl[i],x[j0],xl[j0],A->L->value[j]); #else LIS_QUAD_FMAD_SSE2(y[i],yl[i],y[i],yl[i],x[j0],xl[j0],A->L->value[j]); #endif } is = A->U->ptr[i]; ie = A->U->ptr[i+1]; for(j=is;j<ie-0;j+=1) { j0 = A->U->index[j+0]; #ifndef USE_SSE2 LIS_QUAD_FMAD(y[i],yl[i],y[i],yl[i],x[j0],xl[j0],A->U->value[j]); #else LIS_QUAD_FMAD_SSE2(y[i],yl[i],y[i],yl[i],x[j0],xl[j0],A->U->value[j]); #endif } } } else { jj0 = A->index; vv0 = A->value; #ifdef _OPENMP #ifndef USE_SSE2 #pragma omp parallel for private(i,j,is,ie,j0,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel for private(i,j,is,ie,j0,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif #endif for(i=0;i<n;i++) { y[i] = yl[i] = 0.0; is = A->ptr[i]; ie = A->ptr[i+1]; for(j=is;j<ie-0;j+=1) { j0 = jj0[j+0]; #ifndef USE_SSE2 LIS_QUAD_FMAD(y[i],yl[i],y[i],yl[i],x[j0],xl[j0],vv0[j]); #else LIS_QUAD_FMAD_SSE2(y[i],yl[i],y[i],yl[i],x[j0],xl[j0],vv0[j]); #endif } } } } void lis_matvec_crs_mp2(LIS_MATRIX A, LIS_VECTOR X, LIS_VECTOR Y) { LIS_INT i,j,n; LIS_INT is,ie; LIS_INT j0,j1; LIS_INT *jj0; LIS_SCALAR *vv0; LIS_SCALAR *x,*y,*xl,*yl; LIS_QUAD_PD tt; LIS_QUAD_DECLAR; n = A->n; x = X->value; y = Y->value; xl = X->value_lo; yl = Y->value_lo; if( A->is_splited ) { #ifdef _OPENMP #ifndef USE_SSE2 #pragma omp parallel for private(i,j,is,ie,j0,j1,tt,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel for private(i,j,is,ie,j0,j1,tt,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif #endif for(i=0;i<n;i++) { #ifndef USE_SSE2 LIS_QUAD_MULD(y[i],yl[i],x[i],xl[i],A->D->value[i]); #else LIS_QUAD_MULD_SSE2(y[i],yl[i],x[i],xl[i],A->D->value[i]); #endif tt.hi[0] = tt.hi[1] = tt.lo[0] = tt.lo[1] = 0.0; is = A->L->ptr[i]; ie = A->L->ptr[i+1]; for(j=is;j<ie-1;j+=2) { j0 = A->L->index[j+0]; j1 = A->L->index[j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_LDSD(tt.hi[0],tt.lo[0],tt.hi[0],tt.lo[0],x[j0],xl[j0],x[j1],xl[j1],A->L->value[j]); #endif } for(;j<ie;j++) { j0 = A->L->index[j+0]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(y[i],yl[i],y[i],yl[i],x[j0],xl[j0],A->L->value[j]); #endif } is = A->U->ptr[i]; ie = A->U->ptr[i+1]; for(j=is;j<ie-1;j+=2) { j0 = A->U->index[j+0]; j1 = A->U->index[j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_LDSD(tt.hi[0],tt.lo[0],tt.hi[0],tt.lo[0],x[j0],xl[j0],x[j1],xl[j1],A->U->value[j]); #endif } for(;j<ie;j++) { j0 = A->U->index[j+0]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(y[i],yl[i],y[i],yl[i],x[j0],xl[j0],A->U->value[j]); #endif } #ifdef USE_SSE2 LIS_QUAD_ADD_SSE2(y[i],yl[i],y[i],yl[i],tt.hi[0],tt.lo[0]); LIS_QUAD_ADD_SSE2(y[i],yl[i],y[i],yl[i],tt.hi[1],tt.lo[1]); #endif } } else { jj0 = A->index; vv0 = A->value; #ifdef _OPENMP #ifndef USE_SSE2 #pragma omp parallel for private(i,j,is,ie,j0,j1,tt,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel for private(i,j,is,ie,j0,j1,tt,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif #endif for(i=0;i<n;i++) { tt.hi[0] = tt.hi[1] = tt.lo[0] = tt.lo[1] = 0.0; is = A->ptr[i]; ie = A->ptr[i+1]; for(j=is;j<ie-1;j+=2) { j0 = jj0[j+0]; j1 = jj0[j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_LDSD(tt.hi[0],tt.lo[0],tt.hi[0],tt.lo[0],x[j0],xl[j0],x[j1],xl[j1],vv0[j]); #endif } #ifdef USE_SSE2 LIS_QUAD_ADD_SSE2(y[i],yl[i],tt.hi[0],tt.lo[0],tt.hi[1],tt.lo[1]); #endif for(;j<ie;j++) { j0 = jj0[j+0]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(y[i],yl[i],y[i],yl[i],x[j0],xl[j0],vv0[j]); #endif } } } } void lis_matvect_crs_mp(LIS_MATRIX A, LIS_VECTOR X, LIS_VECTOR Y) { LIS_INT i,j,js,je,jj; LIS_INT n,np; LIS_QUAD_PTR tt0; LIS_SCALAR *x,*y,*xl,*yl; #ifdef _OPENMP LIS_INT k,nprocs; LIS_SCALAR *ww,*wwl; #endif LIS_QUAD_DECLAR; n = A->n; np = A->np; x = X->value; y = Y->value; xl = X->value_lo; yl = Y->value_lo; tt0.hi = &X->work[0]; tt0.lo = &X->work[1]; if( A->is_splited ) { #ifdef _OPENMP nprocs = omp_get_max_threads(); ww = (LIS_SCALAR *)lis_malloc( 2*nprocs*np*sizeof(LIS_SCALAR),"lis_matvect_crs_mp::ww" ); wwl = &ww[nprocs*np]; #ifndef USE_SSE2 #pragma omp parallel private(i,j,js,je,jj,k,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel private(i,j,js,je,jj,k,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif { k = omp_get_thread_num(); #pragma omp for for(j=0;j<nprocs;j++) { memset( &ww[j*np], 0, np*sizeof(LIS_SCALAR) ); memset( &wwl[j*np], 0, np*sizeof(LIS_SCALAR) ); } #pragma omp for for(i=0; i<n; i++) { js = A->L->ptr[i]; je = A->L->ptr[i+1]; for(j=js;j<je;j++) { jj = k*np+A->L->index[j]; #ifndef USE_SSE2 LIS_QUAD_FMAD(ww[jj],wwl[jj],ww[jj],wwl[jj],x[i],xl[i],A->L->value[j]); #else LIS_QUAD_FMAD_SSE2(ww[jj],wwl[jj],ww[jj],wwl[jj],x[i],xl[i],A->L->value[j]); #endif } js = A->U->ptr[i]; je = A->U->ptr[i+1]; for(j=js;j<je;j++) { jj = k*np+A->U->index[j]; #ifndef USE_SSE2 LIS_QUAD_FMAD(ww[jj],wwl[jj],ww[jj],wwl[jj],x[i],xl[i],A->U->value[j]); #else LIS_QUAD_FMAD_SSE2(ww[jj],wwl[jj],ww[jj],wwl[jj],x[i],xl[i],A->U->value[j]); #endif } } #pragma omp for for(i=0;i<np;i++) { #ifndef USE_SSE2 LIS_QUAD_MULD(y[i],yl[i],x[i],xl[i],A->D->value[i]); #else LIS_QUAD_MULD_SSE2(y[i],yl[i],x[i],xl[i],A->D->value[i]); #endif for(j=0;j<nprocs;j++) { #ifndef USE_SSE2 LIS_QUAD_ADD(y[i],yl[i],y[i],yl[i],ww[j*np+i],wwl[j*np+i]); #else LIS_QUAD_ADD_SSE2(y[i],yl[i],y[i],yl[i],ww[j*np+i],wwl[j*np+i]); #endif } } } lis_free(ww); #else for(i=0; i<np; i++) { #ifndef USE_SSE2 LIS_QUAD_MULD(y[i],yl[i],x[i],xl[i],A->D->value[i]); #else LIS_QUAD_MULD_SSE2(y[i],yl[i],x[i],xl[i],A->D->value[i]); #endif } for(i=0; i<n; i++) { js = A->L->ptr[i]; je = A->L->ptr[i+1]; for(j=js;j<je;j++) { jj = A->L->index[j]; #ifndef USE_SSE2 LIS_QUAD_FMAD(y[jj],yl[jj],y[jj],yl[jj],x[i],xl[i],A->L->value[j]); #else LIS_QUAD_FMAD_SSE2(y[jj],yl[jj],y[jj],yl[jj],x[i],xl[i],A->L->value[j]); #endif } js = A->U->ptr[i]; je = A->U->ptr[i+1]; for(j=js;j<je;j++) { jj = A->U->index[j]; #ifndef USE_SSE2 LIS_QUAD_FMAD(y[jj],yl[jj],y[jj],yl[jj],x[i],xl[i],A->U->value[j]); #else LIS_QUAD_FMAD_SSE2(y[jj],yl[jj],y[jj],yl[jj],x[i],xl[i],A->U->value[j]); #endif } } #endif } else { #ifdef _OPENMP nprocs = omp_get_max_threads(); ww = (LIS_SCALAR *)lis_malloc( 2*nprocs*np*sizeof(LIS_SCALAR),"lis_matvect_crs_mp::ww" ); wwl = &ww[nprocs*np]; #ifndef USE_SSE2 #pragma omp parallel private(i,j,js,je,jj,k,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel private(i,j,js,je,jj,k,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif { k = omp_get_thread_num(); #pragma omp for for(j=0;j<nprocs;j++) { memset( &ww[j*np], 0, np*sizeof(LIS_SCALAR) ); memset( &wwl[j*np], 0, np*sizeof(LIS_SCALAR) ); } #pragma omp for for(i=0; i<n; i++) { js = A->ptr[i]; je = A->ptr[i+1]; for(j=js;j<je;j++) { jj = k*np+A->index[j]; #ifndef USE_SSE2 LIS_QUAD_FMAD(ww[jj],wwl[jj],ww[jj],wwl[jj],x[i],xl[i],A->value[j]); #else LIS_QUAD_FMAD_SSE2(ww[jj],wwl[jj],ww[jj],wwl[jj],x[i],xl[i],A->value[j]); #endif } } #pragma omp for for(i=0;i<np;i++) { y[i] = yl[i] = 0.0; for(j=0;j<nprocs;j++) { #ifndef USE_SSE2 LIS_QUAD_ADD(y[i],yl[i],y[i],yl[i],ww[j*np+i],wwl[j*np+i]); #else LIS_QUAD_ADD_SSE2(y[i],yl[i],y[i],yl[i],ww[j*np+i],wwl[j*np+i]); #endif } } } lis_free(ww); #else for(i=0; i<np; i++) { y[i] = 0.0; yl[i] = 0.0; } for(i=0; i<n; i++) { js = A->ptr[i]; je = A->ptr[i+1]; tt0.hi[0] = x[i]; tt0.lo[0] = xl[i]; for(j=js;j<je;j++) { jj = A->index[j]; #ifndef USE_SSE2 LIS_QUAD_FMAD(y[jj],yl[jj],y[jj],yl[jj],tt0.hi[0],tt0.lo[0],A->value[j]); #else LIS_QUAD_FMAD_SSE2(y[jj],yl[jj],y[jj],yl[jj],tt0.hi[0],tt0.lo[0],A->value[j]); #endif } } #endif } } void lis_matvect_crs_mp2(LIS_MATRIX A, LIS_VECTOR X, LIS_VECTOR Y) { LIS_INT i,j,js,je,j0,j1; LIS_INT n,np; LIS_QUAD_PTR tt0; LIS_SCALAR *x,*y,*xl,*yl; #ifdef _OPENMP LIS_INT k,nprocs; LIS_SCALAR *ww,*wwl; #endif LIS_QUAD_DECLAR; n = A->n; np = A->np; x = X->value; y = Y->value; xl = X->value_lo; yl = Y->value_lo; tt0.hi = &X->work[0]; tt0.lo = &X->work[2]; if( A->is_splited ) { #ifdef _OPENMP nprocs = omp_get_max_threads(); ww = (LIS_SCALAR *)lis_malloc( 2*nprocs*np*sizeof(LIS_SCALAR), "lis_matvect_crs_mp2::ww" ); wwl = &ww[nprocs*np]; #ifndef USE_SSE2 #pragma omp parallel private(i,j,js,je,j0,j1,k,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel private(i,j,js,je,j0,j1,k,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif { k = omp_get_thread_num(); #pragma omp for for(i=0; i<np; i++) { #ifndef USE_SSE2 LIS_QUAD_MULD(ww[i],wwl[i],x[i],xl[i],A->D->value[i]); #else LIS_QUAD_MULD_SSE2(ww[i],wwl[i],x[i],xl[i],A->D->value[i]); #endif } #pragma omp for for(i=0; i<n; i++) { js = A->L->ptr[i]; je = A->L->ptr[i+1]; for(j=js;j<je-1;j+=2) { j0 = k*np + A->L->index[j]; j1 = k*np + A->L->index[j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_STSD(ww[j0],wwl[j0],ww[j1],wwl[j1],ww[j0],wwl[j0],ww[j1],wwl[j1],x[i],xl[i],x[i],xl[i],A->L->value[j]); #endif } for(;j<je;j++) { j0 = A->L->index[j]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(ww[j0],wwl[j0],ww[j0],wwl[j0],x[i],xl[i],A->L->value[j]); #endif } js = A->U->ptr[i]; je = A->U->ptr[i+1]; for(j=js;j<je-1;j+=2) { j0 = k*np + A->U->index[j]; j1 = k*np + A->U->index[j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_STSD(ww[j0],wwl[j0],ww[j1],wwl[j1],ww[j0],wwl[j0],ww[j1],wwl[j1],x[i],xl[i],x[i],xl[i],A->U->value[j]); #endif } for(;j<je;j++) { j0 = A->U->index[j]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(ww[j0],wwl[j0],ww[j0],wwl[j0],x[i],xl[i],A->U->value[j]); #endif } } #pragma omp for for(i=0;i<np;i++) { y[i] = yl[i] = 0.0; for(j=0;j<nprocs;j++) { #ifdef USE_SSE2 LIS_QUAD_ADD_SSE2(y[i],yl[i],y[i],yl[i],ww[j*np+i],wwl[j*np+i]); #endif } } } lis_free(ww); #else for(i=0; i<np; i++) { #ifndef USE_SSE2 LIS_QUAD_MULD(y[i],yl[i],x[i],xl[i],A->D->value[i]); #else LIS_QUAD_MULD_SSE2(y[i],yl[i],x[i],xl[i],A->D->value[i]); #endif } for(i=0; i<n; i++) { js = A->L->ptr[i]; je = A->L->ptr[i+1]; for(j=js;j<je-1;j+=2) { j0 = A->L->index[j]; j1 = A->L->index[j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_STSD(y[j0],yl[j0],y[j1],yl[j1],y[j0],yl[j0],y[j1],yl[j1],x[i],xl[i],x[i],xl[i],A->L->value[j]); #endif } for(;j<je;j++) { j0 = A->L->index[j]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(y[j0],yl[j0],y[j0],yl[j0],tt0.hi[0],tt0.lo[0],A->L->value[j]); #endif } js = A->U->ptr[i]; je = A->U->ptr[i+1]; for(j=js;j<je-1;j+=2) { j0 = A->U->index[j]; j1 = A->U->index[j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_STSD(y[j0],yl[j0],y[j1],yl[j1],y[j0],yl[j0],y[j1],yl[j1],x[i],xl[i],x[i],xl[i],A->U->value[j]); #endif } for(;j<je;j++) { j0 = A->U->index[j]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(y[j0],yl[j0],y[j0],yl[j0],tt0.hi[0],tt0.lo[0],A->U->value[j]); #endif } } #endif } else { #ifdef _OPENMP nprocs = omp_get_max_threads(); ww = (LIS_SCALAR *)lis_malloc( 2*nprocs*np*sizeof(LIS_SCALAR), "lis_matvect_crs_mp2::ww" ); wwl = &ww[nprocs*np]; #ifndef USE_SSE2 #pragma omp parallel private(i,j,js,je,j0,j1,k,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel private(i,j,js,je,j0,j1,k,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif { k = omp_get_thread_num(); #pragma omp for for(j=0;j<nprocs;j++) { memset( &ww[j*np], 0, np*sizeof(LIS_SCALAR) ); memset( &wwl[j*np], 0, np*sizeof(LIS_SCALAR) ); } #pragma omp for for(i=0; i<n; i++) { js = A->ptr[i]; je = A->ptr[i+1]; for(j=js;j<je-1;j+=2) { j0 = k*np + A->index[j]; j1 = k*np + A->index[j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_STSD(ww[j0],wwl[j0],ww[j1],wwl[j1],ww[j0],wwl[j0],ww[j1],wwl[j1],x[i],xl[i],x[i],xl[i],A->value[j]); #endif } for(;j<je;j++) { j0 = A->index[j]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(ww[j0],wwl[j0],ww[j0],wwl[j0],x[i],xl[i],A->value[j]); #endif } } #pragma omp for for(i=0;i<np;i++) { y[i] = yl[i] = 0.0; for(j=0;j<nprocs;j++) { #ifdef USE_SSE2 LIS_QUAD_ADD_SSE2(y[i],yl[i],y[i],yl[i],ww[j*np+i],wwl[j*np+i]); #endif } } } lis_free(ww); #else for(i=0; i<np; i++) { y[i] = 0.0; yl[i] = 0.0; } for(i=0; i<n; i++) { js = A->ptr[i]; je = A->ptr[i+1]; for(j=js;j<je-1;j+=2) { j0 = A->index[j]; j1 = A->index[j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_STSD(y[j0],yl[j0],y[j1],yl[j1],y[j0],yl[j0],y[j1],yl[j1],x[i],xl[i],x[i],xl[i],A->value[j]); #endif } for(;j<je;j++) { j0 = A->index[j]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(y[j0],yl[j0],y[j0],yl[j0],x[i],xl[i],A->value[j]); #endif } } #endif } } #endif #ifdef USE_QUAD_PRECISION void lis_matvec_ccs_mp(LIS_MATRIX A, LIS_VECTOR X, LIS_VECTOR Y) { LIS_INT i,j,js,je,jj; LIS_INT n,np; LIS_QUAD_PTR tt0; LIS_SCALAR *x,*y,*xl,*yl; #ifdef _OPENMP LIS_INT k,nprocs; LIS_SCALAR *ww,*wwl; #endif LIS_QUAD_DECLAR; n = A->n; np = A->np; x = X->value; y = Y->value; xl = X->value_lo; yl = Y->value_lo; tt0.hi = &X->work[0]; tt0.lo = &X->work[1]; if( A->is_splited ) { #ifdef _OPENMP nprocs = omp_get_max_threads(); ww = (LIS_SCALAR *)lis_malloc( 2*nprocs*np*sizeof(LIS_SCALAR),"lis_matvect_crs_mp::ww" ); wwl = &ww[nprocs*np]; #ifndef USE_SSE2 #pragma omp parallel private(i,j,js,je,jj,k,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel private(i,j,js,je,jj,k,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif { k = omp_get_thread_num(); #pragma omp for for(j=0;j<nprocs;j++) { memset( &ww[j*np], 0, np*sizeof(LIS_SCALAR) ); memset( &wwl[j*np], 0, np*sizeof(LIS_SCALAR) ); } #pragma omp for for(i=0; i<np; i++) { js = A->L->ptr[i]; je = A->L->ptr[i+1]; for(j=js;j<je;j++) { jj = k*np+A->L->index[j]; #ifndef USE_SSE2 LIS_QUAD_FMAD(ww[jj],wwl[jj],ww[jj],wwl[jj],x[i],xl[i],A->L->value[j]); #else LIS_QUAD_FMAD_SSE2(ww[jj],wwl[jj],ww[jj],wwl[jj],x[i],xl[i],A->L->value[j]); #endif } js = A->U->ptr[i]; je = A->U->ptr[i+1]; for(j=js;j<je;j++) { jj = k*np+A->U->index[j]; #ifndef USE_SSE2 LIS_QUAD_FMAD(ww[jj],wwl[jj],ww[jj],wwl[jj],x[i],xl[i],A->U->value[j]); #else LIS_QUAD_FMAD_SSE2(ww[jj],wwl[jj],ww[jj],wwl[jj],x[i],xl[i],A->U->value[j]); #endif } } #pragma omp for for(i=0;i<n;i++) { #ifndef USE_SSE2 LIS_QUAD_MULD(y[i],yl[i],x[i],xl[i],A->D->value[i]); #else LIS_QUAD_MULD_SSE2(y[i],yl[i],x[i],xl[i],A->D->value[i]); #endif for(j=0;j<nprocs;j++) { #ifndef USE_SSE2 LIS_QUAD_ADD(y[i],yl[i],y[i],yl[i],ww[j*np+i],wwl[j*np+i]); #else LIS_QUAD_ADD_SSE2(y[i],yl[i],y[i],yl[i],ww[j*np+i],wwl[j*np+i]); #endif } } } lis_free(ww); #else for(i=0; i<n; i++) { #ifndef USE_SSE2 LIS_QUAD_MULD(y[i],yl[i],x[i],xl[i],A->D->value[i]); #else LIS_QUAD_MULD_SSE2(y[i],yl[i],x[i],xl[i],A->D->value[i]); #endif } for(i=0; i<np; i++) { js = A->L->ptr[i]; je = A->L->ptr[i+1]; for(j=js;j<je;j++) { jj = A->L->index[j]; #ifndef USE_SSE2 LIS_QUAD_FMAD(y[jj],yl[jj],y[jj],yl[jj],x[i],xl[i],A->L->value[j]); #else LIS_QUAD_FMAD_SSE2(y[jj],yl[jj],y[jj],yl[jj],x[i],xl[i],A->L->value[j]); #endif } js = A->U->ptr[i]; je = A->U->ptr[i+1]; for(j=js;j<je;j++) { jj = A->U->index[j]; #ifndef USE_SSE2 LIS_QUAD_FMAD(y[jj],yl[jj],y[jj],yl[jj],x[i],xl[i],A->U->value[j]); #else LIS_QUAD_FMAD_SSE2(y[jj],yl[jj],y[jj],yl[jj],x[i],xl[i],A->U->value[j]); #endif } } #endif } else { #ifdef _OPENMP nprocs = omp_get_max_threads(); ww = (LIS_SCALAR *)lis_malloc( 2*nprocs*np*sizeof(LIS_SCALAR),"lis_matvect_crs_mp::ww" ); wwl = &ww[nprocs*np]; #ifndef USE_SSE2 #pragma omp parallel private(i,j,js,je,jj,k,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel private(i,j,js,je,jj,k,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif { k = omp_get_thread_num(); #pragma omp for for(j=0;j<nprocs;j++) { memset( &ww[j*np], 0, np*sizeof(LIS_SCALAR) ); memset( &wwl[j*np], 0, np*sizeof(LIS_SCALAR) ); } #pragma omp for for(i=0; i<np; i++) { js = A->ptr[i]; je = A->ptr[i+1]; for(j=js;j<je;j++) { jj = k*np+A->L->index[j]; #ifndef USE_SSE2 LIS_QUAD_FMAD(ww[jj],wwl[jj],ww[jj],wwl[jj],x[i],xl[i],A->L->value[j]); #else LIS_QUAD_FMAD_SSE2(ww[jj],wwl[jj],ww[jj],wwl[jj],x[i],xl[i],A->L->value[j]); #endif } } #pragma omp for for(i=0;i<np;i++) { y[i] = yl[i] = 0.0; for(j=0;j<nprocs;j++) { #ifndef USE_SSE2 LIS_QUAD_ADD(y[i],yl[i],y[i],yl[i],ww[j*np+i],wwl[j*np+i]); #else LIS_QUAD_ADD_SSE2(y[i],yl[i],y[i],yl[i],ww[j*np+i],wwl[j*np+i]); #endif } } } lis_free(ww); #else for(i=0; i<n; i++) { y[i] = 0.0; yl[i] = 0.0; } for(i=0; i<np; i++) { js = A->ptr[i]; je = A->ptr[i+1]; tt0.hi[0] = x[i]; tt0.lo[0] = xl[i]; for(j=js;j<je;j++) { jj = A->index[j]; #ifndef USE_SSE2 LIS_QUAD_FMAD(y[jj],yl[jj],y[jj],yl[jj],tt0.hi[0],tt0.lo[0],A->value[j]); #else LIS_QUAD_FMAD_SSE2(y[jj],yl[jj],y[jj],yl[jj],tt0.hi[0],tt0.lo[0],A->value[j]); #endif } } #endif } } void lis_matvec_ccs_mp2(LIS_MATRIX A, LIS_VECTOR X, LIS_VECTOR Y) { LIS_INT i,j,js,je,j0,j1; LIS_INT n,np; LIS_QUAD_PTR tt0; LIS_SCALAR *x,*y,*xl,*yl; #ifdef _OPENMP LIS_INT k,nprocs; LIS_SCALAR *ww,*wwl; #endif LIS_QUAD_DECLAR; n = A->n; np = A->np; x = X->value; y = Y->value; xl = X->value_lo; yl = Y->value_lo; tt0.hi = &X->work[0]; tt0.lo = &X->work[2]; if( A->is_splited ) { #ifdef _OPENMP nprocs = omp_get_max_threads(); ww = (LIS_SCALAR *)lis_malloc( 2*nprocs*np*sizeof(LIS_SCALAR), "lis_matvect_crs_mp2::ww" ); wwl = &ww[nprocs*np]; #ifndef USE_SSE2 #pragma omp parallel private(i,j,js,je,j0,j1,k,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel private(i,j,js,je,j0,j1,k,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif { k = omp_get_thread_num(); #pragma omp for for(j=0;j<nprocs;j++) { memset( &ww[j*np], 0, np*sizeof(LIS_SCALAR) ); memset( &wwl[j*np], 0, np*sizeof(LIS_SCALAR) ); } #pragma omp for for(i=0; i<np; i++) { js = A->ptr[i]; je = A->ptr[i+1]; for(j=js;j<je-1;j+=2) { j0 = k*np + A->index[j]; j1 = k*np + A->index[j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_STSD(ww[j0],wwl[j0],ww[j1],wwl[j1],ww[j0],wwl[j0],ww[j1],wwl[j1],x[i],xl[i],x[i],xl[i],A->value[j]); #endif } for(;j<je;j++) { j0 = A->index[j]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(ww[j0],wwl[j0],ww[j0],wwl[j0],x[i],xl[i],A->value[j]); #endif } } #pragma omp for for(i=0;i<n;i++) { y[i] = yl[i] = 0.0; for(j=0;j<nprocs;j++) { #ifdef USE_SSE2 LIS_QUAD_ADD_SSE2(y[i],yl[i],y[i],yl[i],ww[j*np+i],wwl[j*np+i]); #endif } } } lis_free(ww); #else for(i=0; i<n; i++) { #ifndef USE_SSE2 LIS_QUAD_MULD(y[i],yl[i],x[i],xl[i],A->D->value[i]); #else LIS_QUAD_MULD_SSE2(y[i],yl[i],x[i],xl[i],A->D->value[i]); #endif } for(i=0; i<np; i++) { js = A->L->ptr[i]; je = A->L->ptr[i+1]; for(j=js;j<je-1;j+=2) { j0 = A->L->index[j]; j1 = A->L->index[j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_STSD(y[j0],yl[j0],y[j1],yl[j1],y[j0],yl[j0],y[j1],yl[j1],x[i],xl[i],x[i],xl[i],A->L->value[j]); #endif } for(;j<je;j++) { j0 = A->L->index[j]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(y[j0],yl[j0],y[j0],yl[j0],tt0.hi[0],tt0.lo[0],A->L->value[j]); #endif } js = A->U->ptr[i]; je = A->U->ptr[i+1]; for(j=js;j<je-1;j+=2) { j0 = A->U->index[j]; j1 = A->U->index[j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_STSD(y[j0],yl[j0],y[j1],yl[j1],y[j0],yl[j0],y[j1],yl[j1],x[i],xl[i],x[i],xl[i],A->U->value[j]); #endif } for(;j<je;j++) { j0 = A->U->index[j]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(y[j0],yl[j0],y[j0],yl[j0],tt0.hi[0],tt0.lo[0],A->U->value[j]); #endif } } #endif } else { #ifdef _OPENMP nprocs = omp_get_max_threads(); ww = (LIS_SCALAR *)lis_malloc( 2*nprocs*np*sizeof(LIS_SCALAR), "lis_matvect_crs_mp2::ww" ); wwl = &ww[nprocs*np]; #ifndef USE_SSE2 #pragma omp parallel private(i,j,js,je,j0,j1,k,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel private(i,j,js,je,j0,j1,k,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif { k = omp_get_thread_num(); #pragma omp for for(j=0;j<nprocs;j++) { memset( &ww[j*np], 0, np*sizeof(LIS_SCALAR) ); memset( &wwl[j*np], 0, np*sizeof(LIS_SCALAR) ); } #pragma omp for for(i=0; i<np; i++) { js = A->ptr[i]; je = A->ptr[i+1]; for(j=js;j<je-1;j+=2) { j0 = k*np + A->index[j]; j1 = k*np + A->index[j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_STSD(ww[j0],wwl[j0],ww[j1],wwl[j1],ww[j0],wwl[j0],ww[j1],wwl[j1],x[i],xl[i],x[i],xl[i],A->value[j]); #endif } for(;j<je;j++) { j0 = A->index[j]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(ww[j0],wwl[j0],ww[j0],wwl[j0],x[i],xl[i],A->value[j]); #endif } } #pragma omp for for(i=0;i<n;i++) { y[i] = yl[i] = 0.0; for(j=0;j<nprocs;j++) { #ifdef USE_SSE2 LIS_QUAD_ADD_SSE2(y[i],yl[i],y[i],yl[i],ww[j*np+i],wwl[j*np+i]); #endif } } } lis_free(ww); #else for(i=0; i<n; i++) { y[i] = 0.0; yl[i] = 0.0; } for(i=0; i<np; i++) { js = A->ptr[i]; je = A->ptr[i+1]; for(j=js;j<je-1;j+=2) { j0 = A->index[j]; j1 = A->index[j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_STSD(y[j0],yl[j0],y[j1],yl[j1],y[j0],yl[j0],y[j1],yl[j1],x[i],xl[i],x[i],xl[i],A->value[j]); #endif } for(;j<je;j++) { j0 = A->index[j]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(y[j0],yl[j0],y[j0],yl[j0],x[i],xl[i],A->value[j]); #endif } } #endif } } void lis_matvect_ccs_mp(LIS_MATRIX A, LIS_VECTOR X, LIS_VECTOR Y) { LIS_INT i,j,np; LIS_INT is,ie,j0; LIS_INT *jj0; LIS_SCALAR *vv0; LIS_SCALAR *x,*y,*xl,*yl; LIS_QUAD_DECLAR; np = A->np; x = X->value; y = Y->value; xl = X->value_lo; yl = Y->value_lo; if( A->is_splited ) { #ifdef _OPENMP #ifndef USE_SSE2 #pragma omp parallel for private(i,j,is,ie,j0,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel for private(i,j,is,ie,j0,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif #endif for(i=0;i<np;i++) { #ifndef USE_SSE2 LIS_QUAD_MULD(y[i],yl[i],x[i],xl[i],A->D->value[i]); #else LIS_QUAD_MULD_SSE2(y[i],yl[i],x[i],xl[i],A->D->value[i]); #endif is = A->L->ptr[i]; ie = A->L->ptr[i+1]; for(j=is;j<ie-0;j+=1) { j0 = A->L->index[j+0]; #ifndef USE_SSE2 LIS_QUAD_FMAD(y[i],yl[i],y[i],yl[i],x[j0],xl[j0],A->L->value[j]); #else LIS_QUAD_FMAD_SSE2(y[i],yl[i],y[i],yl[i],x[j0],xl[j0],A->L->value[j]); #endif } is = A->U->ptr[i]; ie = A->U->ptr[i+1]; for(j=is;j<ie-0;j+=1) { j0 = A->U->index[j+0]; #ifndef USE_SSE2 LIS_QUAD_FMAD(y[i],yl[i],y[i],yl[i],x[j0],xl[j0],A->U->value[j]); #else LIS_QUAD_FMAD_SSE2(y[i],yl[i],y[i],yl[i],x[j0],xl[j0],A->U->value[j]); #endif } } } else { jj0 = A->index; vv0 = A->value; #ifdef _OPENMP #ifndef USE_SSE2 #pragma omp parallel for private(i,j,is,ie,j0,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel for private(i,j,is,ie,j0,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif #endif for(i=0;i<np;i++) { y[i] = yl[i] = 0.0; is = A->ptr[i]; ie = A->ptr[i+1]; for(j=is;j<ie-0;j+=1) { j0 = jj0[j+0]; #ifndef USE_SSE2 LIS_QUAD_FMAD(y[i],yl[i],y[i],yl[i],x[j0],xl[j0],vv0[j]); #else LIS_QUAD_FMAD_SSE2(y[i],yl[i],y[i],yl[i],x[j0],xl[j0],vv0[j]); #endif } } } } void lis_matvect_ccs_mp2(LIS_MATRIX A, LIS_VECTOR X, LIS_VECTOR Y) { LIS_INT i,j,np; LIS_INT is,ie; LIS_INT j0,j1; LIS_INT *jj0; LIS_SCALAR *vv0; LIS_SCALAR *x,*y,*xl,*yl; LIS_QUAD_PD tt; LIS_QUAD_DECLAR; np = A->np; x = X->value; y = Y->value; xl = X->value_lo; yl = Y->value_lo; if( A->is_splited ) { #ifdef _OPENMP #ifndef USE_SSE2 #pragma omp parallel for private(i,j,is,ie,j0,j1,tt,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel for private(i,j,is,ie,j0,j1,tt,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif #endif for(i=0;i<np;i++) { #ifndef USE_SSE2 LIS_QUAD_MULD(y[i],yl[i],x[i],xl[i],A->D->value[i]); #else LIS_QUAD_MULD_SSE2(y[i],yl[i],x[i],xl[i],A->D->value[i]); #endif tt.hi[0] = tt.hi[1] = tt.lo[0] = tt.lo[1] = 0.0; is = A->L->ptr[i]; ie = A->L->ptr[i+1]; for(j=is;j<ie-1;j+=2) { j0 = A->L->index[j+0]; j1 = A->L->index[j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_LDSD(tt.hi[0],tt.lo[0],tt.hi[0],tt.lo[0],x[j0],xl[j0],x[j1],xl[j1],A->L->value[j]); #endif } for(;j<ie;j++) { j0 = A->L->index[j+0]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(y[i],yl[i],y[i],yl[i],x[j0],xl[j0],A->L->value[j]); #endif } is = A->U->ptr[i]; ie = A->U->ptr[i+1]; for(j=is;j<ie-1;j+=2) { j0 = A->U->index[j+0]; j1 = A->U->index[j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_LDSD(tt.hi[0],tt.lo[0],tt.hi[0],tt.lo[0],x[j0],xl[j0],x[j1],xl[j1],A->U->value[j]); #endif } for(;j<ie;j++) { j0 = A->U->index[j+0]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(y[i],yl[i],y[i],yl[i],x[j0],xl[j0],A->U->value[j]); #endif } #ifdef USE_SSE2 LIS_QUAD_ADD_SSE2(y[i],yl[i],y[i],yl[i],tt.hi[0],tt.lo[0]); LIS_QUAD_ADD_SSE2(y[i],yl[i],y[i],yl[i],tt.hi[1],tt.lo[1]); #endif } } else { jj0 = A->index; vv0 = A->value; #ifdef _OPENMP #ifndef USE_SSE2 #pragma omp parallel for private(i,j,is,ie,j0,j1,tt,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel for private(i,j,is,ie,j0,j1,tt,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif #endif for(i=0;i<np;i++) { tt.hi[0] = tt.hi[1] = tt.lo[0] = tt.lo[1] = 0.0; is = A->ptr[i]; ie = A->ptr[i+1]; for(j=is;j<ie-1;j+=2) { j0 = jj0[j+0]; j1 = jj0[j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_LDSD(tt.hi[0],tt.lo[0],tt.hi[0],tt.lo[0],x[j0],xl[j0],x[j1],xl[j1],vv0[j]); #endif } #ifdef USE_SSE2 LIS_QUAD_ADD_SSE2(y[i],yl[i],tt.hi[0],tt.lo[0],tt.hi[1],tt.lo[1]); #endif for(;j<ie;j++) { j0 = jj0[j+0]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(y[i],yl[i],y[i],yl[i],x[j0],xl[j0],vv0[j]); #endif } } } } #endif
blas.c
#include "blas.h" #include "utils.h" #include <math.h> #include <assert.h> #include <float.h> #include <stdio.h> #include <stdlib.h> #include <string.h> void reorg_cpu(float *x, int out_w, int out_h, int out_c, int batch, int stride, int forward, float *out) { int b,i,j,k; int in_c = out_c/(stride*stride); //printf("\n out_c = %d, out_w = %d, out_h = %d, stride = %d, forward = %d \n", out_c, out_w, out_h, stride, forward); //printf(" in_c = %d, in_w = %d, in_h = %d \n", in_c, out_w*stride, out_h*stride); for(b = 0; b < batch; ++b){ for(k = 0; k < out_c; ++k){ for(j = 0; j < out_h; ++j){ for(i = 0; i < out_w; ++i){ int in_index = i + out_w*(j + out_h*(k + out_c*b)); int c2 = k % in_c; int offset = k / in_c; int w2 = i*stride + offset % stride; int h2 = j*stride + offset / stride; int out_index = w2 + out_w*stride*(h2 + out_h*stride*(c2 + in_c*b)); if(forward) out[out_index] = x[in_index]; // used by default for forward (i.e. forward = 0) else out[in_index] = x[out_index]; } } } } } void flatten(float *x, int size, int layers, int batch, int forward) { float* swap = (float*)xcalloc(size * layers * batch, sizeof(float)); int i,c,b; for(b = 0; b < batch; ++b){ for(c = 0; c < layers; ++c){ for(i = 0; i < size; ++i){ int i1 = b*layers*size + c*size + i; int i2 = b*layers*size + i*layers + c; if (forward) swap[i2] = x[i1]; else swap[i1] = x[i2]; } } } memcpy(x, swap, size*layers*batch*sizeof(float)); free(swap); } void weighted_sum_cpu(float *a, float *b, float *s, int n, float *c) { int i; for(i = 0; i < n; ++i){ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0); } } void weighted_delta_cpu(float *a, float *b, float *s, float *da, float *db, float *ds, int n, float *dc) { int i; for(i = 0; i < n; ++i){ if(da) da[i] += dc[i] * s[i]; if(db) db[i] += dc[i] * (1-s[i]); ds[i] += dc[i] * (a[i] - b[i]); } } static float relu(float src) { if (src > 0) return src; return 0; } void shortcut_multilayer_cpu(int size, int src_outputs, int batch, int n, int *outputs_of_layers, float **layers_output, float *out, float *in, float *weights, int nweights, WEIGHTS_NORMALIZATION_T weights_normalization) { // nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w) const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w) int step = 0; if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1 int id; #pragma omp parallel for for (id = 0; id < size; ++id) { int src_id = id; const int src_i = src_id % src_outputs; src_id /= src_outputs; int src_b = src_id; float sum = 1, max_val = -FLT_MAX; int i; if (weights && weights_normalization) { if (weights_normalization == SOFTMAX_NORMALIZATION) { for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (max_val < w) max_val = w; } } const float eps = 0.0001; sum = eps; for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] const float w = weights[weights_index]; if (weights_normalization == RELU_NORMALIZATION) sum += relu(w); else if (weights_normalization == SOFTMAX_NORMALIZATION) sum += expf(w - max_val); } } if (weights) { float w = weights[src_i / step]; if (weights_normalization == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; out[id] = in[id] * w; // [0 or c or (c, h ,w)] } else out[id] = in[id]; // layers for (i = 0; i < n; ++i) { int add_outputs = outputs_of_layers[i]; if (src_i < add_outputs) { int add_index = add_outputs*src_b + src_i; int out_index = id; float *add = layers_output[i]; if (weights) { const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (weights_normalization == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; out[out_index] += add[add_index] * w; // [0 or c or (c, h ,w)] } else out[out_index] += add[add_index]; } } } } void backward_shortcut_multilayer_cpu(int size, int src_outputs, int batch, int n, int *outputs_of_layers, float **layers_delta, float *delta_out, float *delta_in, float *weights, float *weight_updates, int nweights, float *in, float **layers_output, WEIGHTS_NORMALIZATION_T weights_normalization) { // nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w) const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w) int step = 0; if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1 int id; #pragma omp parallel for for (id = 0; id < size; ++id) { int src_id = id; int src_i = src_id % src_outputs; src_id /= src_outputs; int src_b = src_id; float grad = 1, sum = 1, max_val = -FLT_MAX;; int i; if (weights && weights_normalization) { if (weights_normalization == SOFTMAX_NORMALIZATION) { for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (max_val < w) max_val = w; } } const float eps = 0.0001; sum = eps; for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] const float w = weights[weights_index]; if (weights_normalization == RELU_NORMALIZATION) sum += relu(w); else if (weights_normalization == SOFTMAX_NORMALIZATION) sum += expf(w - max_val); } /* grad = 0; for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] const float delta_w = delta_in[id] * in[id]; const float w = weights[weights_index]; if (weights_normalization == RELU_NORMALIZATION) grad += delta_w * relu(w) / sum; else if (weights_normalization == SOFTMAX_NORMALIZATION) grad += delta_w * expf(w - max_val) / sum; } */ } if (weights) { float w = weights[src_i / step]; if (weights_normalization == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; delta_out[id] += delta_in[id] * w; // [0 or c or (c, h ,w)] weight_updates[src_i / step] += delta_in[id] * in[id] * grad; } else delta_out[id] += delta_in[id]; // layers for (i = 0; i < n; ++i) { int add_outputs = outputs_of_layers[i]; if (src_i < add_outputs) { int add_index = add_outputs*src_b + src_i; int out_index = id; float *layer_delta = layers_delta[i]; if (weights) { float *add = layers_output[i]; const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (weights_normalization == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; layer_delta[add_index] += delta_in[id] * w; // [0 or c or (c, h ,w)] weight_updates[weights_index] += delta_in[id] * add[add_index] * grad; } else layer_delta[add_index] += delta_in[id]; } } } } void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out) { int stride = w1/w2; int sample = w2/w1; assert(stride == h1/h2); assert(sample == h2/h1); if(stride < 1) stride = 1; if(sample < 1) sample = 1; int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int i,j,k,b; for(b = 0; b < batch; ++b){ for(k = 0; k < minc; ++k){ for(j = 0; j < minh; ++j){ for(i = 0; i < minw; ++i){ int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] += add[add_index]; } } } } } void mean_cpu(float *x, int batch, int filters, int spatial, float *mean) { float scale = 1./(batch * spatial); int i,j,k; for(i = 0; i < filters; ++i){ mean[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; mean[i] += x[index]; } } mean[i] *= scale; } } void variance_cpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { float scale = 1./(batch * spatial - 1); int i,j,k; for(i = 0; i < filters; ++i){ variance[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance[i] += pow((x[index] - mean[i]), 2); } } variance[i] *= scale; } } void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { int b, f, i; for(b = 0; b < batch; ++b){ for(f = 0; f < filters; ++f){ for(i = 0; i < spatial; ++i){ int index = b*filters*spatial + f*spatial + i; x[index] = (x[index] - mean[f])/(sqrt(variance[f] + .000001f)); } } } } void const_cpu(int N, float ALPHA, float *X, int INCX) { int i; for(i = 0; i < N; ++i) X[i*INCX] = ALPHA; } void mul_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] *= X[i*INCX]; } void pow_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] = pow(X[i*INCX], ALPHA); } void axpy_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] += ALPHA*X[i*INCX]; } void scal_cpu(int N, float ALPHA, float *X, int INCX) { int i; for(i = 0; i < N; ++i) X[i*INCX] *= ALPHA; } void scal_add_cpu(int N, float ALPHA, float BETA, float *X, int INCX) { int i; for (i = 0; i < N; ++i) X[i*INCX] = X[i*INCX] * ALPHA + BETA; } void fill_cpu(int N, float ALPHA, float *X, int INCX) { int i; if (INCX == 1 && ALPHA == 0) { memset(X, 0, N * sizeof(float)); } else { for (i = 0; i < N; ++i) X[i*INCX] = ALPHA; } } void deinter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i, j; int index = 0; for(j = 0; j < B; ++j) { for(i = 0; i < NX; ++i){ if(X) X[j*NX + i] += OUT[index]; ++index; } for(i = 0; i < NY; ++i){ if(Y) Y[j*NY + i] += OUT[index]; ++index; } } } void inter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i, j; int index = 0; for(j = 0; j < B; ++j) { for(i = 0; i < NX; ++i){ OUT[index++] = X[j*NX + i]; } for(i = 0; i < NY; ++i){ OUT[index++] = Y[j*NY + i]; } } } void copy_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX]; } void mult_add_into_cpu(int N, float *X, float *Y, float *Z) { int i; for(i = 0; i < N; ++i) Z[i] += X[i]*Y[i]; } void smooth_l1_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float diff = truth[i] - pred[i]; float abs_val = fabs(diff); if(abs_val < 1) { error[i] = diff * diff; delta[i] = diff; } else { error[i] = 2*abs_val - 1; delta[i] = (diff > 0) ? 1 : -1; } } } void l1_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float diff = truth[i] - pred[i]; error[i] = fabs(diff); delta[i] = diff > 0 ? 1 : -1; } } void softmax_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float t = truth[i]; float p = pred[i]; error[i] = (t) ? -log(p) : 0; delta[i] = t-p; } } void logistic_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float t = truth[i]; float p = pred[i]; error[i] = -t*log(p) - (1-t)*log(1-p); delta[i] = t-p; } } void l2_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float diff = truth[i] - pred[i]; error[i] = diff * diff; delta[i] = diff; } } float dot_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; float dot = 0; for(i = 0; i < N; ++i) dot += X[i*INCX] * Y[i*INCY]; return dot; } void softmax(float *input, int n, float temp, float *output, int stride) { int i; float sum = 0; float largest = -FLT_MAX; for(i = 0; i < n; ++i){ if(input[i*stride] > largest) largest = input[i*stride]; } for(i = 0; i < n; ++i){ float e = exp(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int g, b; for(b = 0; b < batch; ++b){ for(g = 0; g < groups; ++g){ softmax(input + b*batch_offset + g*group_offset, n, temp, output + b*batch_offset + g*group_offset, stride); } } } void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { int i, j, k, b; for (b = 0; b < batch; ++b) { for (k = 0; k < c; ++k) { for (j = 0; j < h*stride; ++j) { for (i = 0; i < w*stride; ++i) { int in_index = b*w*h*c + k*w*h + (j / stride)*w + i / stride; int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i; if (forward) out[out_index] = scale*in[in_index]; else in[in_index] += scale*out[out_index]; } } } } } void constrain_cpu(int size, float ALPHA, float *X) { int i; for (i = 0; i < size; ++i) { X[i] = fminf(ALPHA, fmaxf(-ALPHA, X[i])); } } void fix_nan_and_inf_cpu(float *input, size_t size) { int i; for (i = 0; i < size; ++i) { float val = input[i]; if (isnan(val) || isinf(val)) input[i] = 1.0f / i; // pseudo random value } }
mixed_tentusscher_myo_epi_2004_S2_4.c
// Scenario 2 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S2_4.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5574211764260,0.00129305755715058,0.779441422719268,0.779241742711666,0.000175039240857358,0.484977289081740,0.00294257507368012,0.999998344595344,1.93700269716616e-08,1.89380174481509e-05,0.999773792418493,1.00755963480393,0.999999137126184,3.41466316398601e-05,1.23162815450729,9.71224673801957,139.552422843336}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.0344988699429,0.000243427554127383,0.000161272832250911,0.000484228011827550,0.275092424538870,0.175484829191378,0.164879494363494,3.77803127027096,0.0197412874581791,1.93055058781161,1099.31582404877,0.000553709594039336,0.144015543772373,0.0199814298252655,0.00826445055600327,9.00070147931675e-06}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
LMSolver.h
#pragma once #include <eigen\Core> #include <eigen\Dense> #include <eigen\Svd> #include <eigen\Sparse> /** * Levenberg-Marquardt Method for dense fixed-size equations * min_x { f(x)^2 }, with f_(m,1) and x_(n,1) * */ template<int M, int N> class CSmallLMSolver { public: typedef double real; typedef Eigen::Matrix<real,M,1> VecM; typedef Eigen::Matrix<real,N,1> VecN; typedef Eigen::Matrix<real,M,N> MatMN; typedef Eigen::Matrix<real,N,M> MatNM; typedef Eigen::Matrix<real,N,N> MatNN; #define CSmallLMSolver_Max(a,b) (a)>(b) ? (a) : (b); public: real Optimize(VecN& xStart, int nMaxIter, bool showInfo = false) { const static real eps_1 = real(1e-12); const static real eps_2 = real(1e-12); const static real tau = real(1e-3); MatMN Jac; MatNM JacT; MatNN JacTJac; VecM f; //f(x) VecN g; //g=-J'*f VecN h; //(J'J + mu * I) h = g VecN xnew, x; VecM fnew; //f(xnew) Eigen::LDLT<MatNN> solver; //initial settings x = xStart; CalcJacobiFunc(x, Jac); //J JacT = Jac.transpose(); CalcEnergyFunc(x, f); //f g = JacT * (-f); //g if(g.norm() <= eps_1) return true; JacTJac = JacT*Jac; solver.compute(JacTJac); h = solver.solve(g); //J'Jh = g //find the diag of J'J real v=real(2), mu=real(0); for(int i=0; i<JacTJac.rows(); i++) mu = CSmallLMSolver_Max(mu, JacTJac(i,i)); mu *= tau; for(int iter=0; iter < nMaxIter; iter++) { //+mu*I for(int i=0; i<JacTJac.rows(); i++) JacTJac(i,i) += mu; //solve solver.compute(JacTJac); h = solver.solve(g); //-mu*I for(int i=0; i<JacTJac.rows(); i++) JacTJac(i,i) -= mu; if(h.norm() <= eps_2 * (x.norm() + eps_2)) break; xnew = x + h; CalcEnergyFunc(xnew, fnew); //fnew //dL = L(0) - L(h) real dL = h.dot(mu*h+g); real dF = f.dot(f) - fnew.dot(fnew); real rho = dF/dL; if(rho > 0) { x = xnew; //x f = fnew; //f CalcJacobiFunc(x, Jac); //J JacT = Jac.transpose(); g = JacT * (-f); //g if(g.norm() <= eps_1) break; JacTJac = JacT * Jac; mu *= CSmallLMSolver_Max(real(1./3.), real(1-pow(2*rho-1,3))); v = 2; } else { mu *= v; v *= 2; } if (showInfo) printf("iter: %d, energy: %f, dif: %f\n", iter, sqrt(f.dot(f)), h.norm() / x.norm()); }//end for iter xStart = x; return f.dot(f); } protected: virtual void CalcEnergyFunc(const VecN& x, VecM& fx)=0; //default: calc jacobi matrix numerically with forward diff virtual void CalcJacobiFunc(VecN& x, MatMN& jac) { const static real delta = real(1e-8); VecM fx, fxp; CalcEnergyFunc(x, fx); for(int j=0; j<N; j++) { real d=real(1e-4)*x[j]; // force evaluation d=fabs(d); if(d<delta) d=delta; x[j] += d; CalcEnergyFunc(x, fxp); x[j] -= d; d = real(1.0)/d; for(int i=0; i<M; i++) { jac(i,j) = (fxp[i]-fx[i])*d; } }//end for j } }; template<int M, int N> class CSmallNewtonSolver : public CSmallLMSolver<M,N> { public: real Optimize(VecN& xStart, int nMaxIter, bool showInfo = false) { MatMN jac(M, N); MatMN JacTJac(N, N); VecM fx(M), fx1(M), h(N), g(N); Eigen::LDLT<MatMN> solver; //Gauss-Newton Optimization for (int iter = 0; iter<nMaxIter; iter++) { CalcJacobiFunc(xStart, jac); //J JacTJac = jac.transpose() * jac; CalcEnergyFunc(xStart, fx); //f //solve: J'J h = - J' f(x) g = jac.transpose() * (-fx); solver.compute(JacTJac); h = solver.solve(g); real normv = xStart.norm(); for (real alpha = 1; alpha > 1e-15; alpha *= 0.5) { VecN x = xStart + h; CalcEnergyFunc(x, fx1); //f if (fx1.dot(fx1) > fx.dot(fx)) h = h * 0.5; else { xStart = x; break; } } real normh = h.norm(); if (showInfo) printf("Gauss-Newton: %d -- %f, energy: %f\n", iter, normh / normv, sqrt(fx.dot(fx))); if (normh < (normv + real(1e-6)) * real(1e-6)) break; } return fx.dot(fx); } }; /** * Levenberg-Marquardt Method for dense fix-variable equations * min_x { f(x)^2 }, with f_(m,1) and x_(n,1) * */ template<int N> class CFixVarLMSolver { public: typedef double real; typedef Eigen::Matrix<real,-1,1> VecM; typedef Eigen::Matrix<real,N,1> VecN; typedef Eigen::Matrix<real,-1,N> MatMN; typedef Eigen::Matrix<real,N,-1> MatNM; typedef Eigen::Matrix<real,N,N> MatNN; #define CSmallLMSolver_Max(a,b) (a)>(b) ? (a) : (b); public: real Optimize(VecN& xStart, int nMaxIter) { const static real eps_1 = real(1e-12); const static real eps_2 = real(1e-12); const static real tau = real(1e-3); MatMN Jac(M,N); MatNM JacT(N,M); MatNN JacTJac; VecM f(M); //f(x) VecN g; //g=-J'*f VecN h; //(J'J + mu * I) h = g VecN xnew, x; VecM fnew(M); //f(xnew) Eigen::LDLT<MatNN> solver; //initial settings x = xStart; CalcJacobiFunc(x, Jac); //J JacT = Jac.transpose(); CalcEnergyFunc(x, f); //f g = JacT * (-f); //g if(g.norm() <= eps_1) return true; JacTJac = JacT*Jac; solver.compute(JacTJac); h = solver.solve(g); //J'Jh = g //find the diag of J'J real v=real(2), mu=real(0); for(int i=0; i<JacTJac.rows(); i++) mu = CSmallLMSolver_Max(mu, JacTJac(i,i)); mu *= tau; for(int iter=0; iter < nMaxIter; iter++) { //+mu*I for(int i=0; i<JacTJac.rows(); i++) JacTJac(i,i) += mu; //solve solver.compute(JacTJac); h = solver.solve(g); //-mu*I for(int i=0; i<JacTJac.rows(); i++) JacTJac(i,i) -= mu; if(h.norm() <= eps_2 * (x.norm() + eps_2)) break; xnew = x + h; CalcEnergyFunc(xnew, fnew); //fnew //dL = L(0) - L(h) real dL = h.dot(mu*h+g); real dF = f.dot(f) - fnew.dot(fnew); real rho = dF/dL; if(rho > 0) { x = xnew; //x f = fnew; //f CalcJacobiFunc(x, Jac); //J JacT = Jac.transpose(); g = JacT * (-f); //g if(g.norm() <= eps_1) break; JacTJac = JacT * Jac; mu *= CSmallLMSolver_Max(real(1./3.), real(1-pow(2*rho-1,3))); v = 2; } else { mu *= v; v *= 2; } }//end for iter xStart = x; //return the final energy return ldp::sqr( f.norm() ); } protected: virtual void CalcEnergyFunc(const VecN& x, VecM& fx)=0; //default: calc jacobi matrix numerically with forward diff virtual void CalcJacobiFunc(VecN& x, MatMN& jac) { const static real delta = real(1e-8); VecM fx(M), fxp(M); CalcEnergyFunc(x, fx); for(int j=0; j<N; j++) { real d=real(1e-4)*x[j]; // force evaluation d=fabs(d); if(d<delta) d=delta; x[j] += d; CalcEnergyFunc(x, fxp); x[j] -= d; d = real(1.0)/d; for(int i=0; i<M; i++) { jac(i,j) = (fxp[i]-fx[i])*d; } }//end for j } protected: int M; }; /** * Levenberg-Marquardt Method for dense equations * min_x { f(x)^2 }, with f_(m,1) and x_(n,1) * */ class CDenseLMSolver { public: typedef double real; typedef Eigen::Matrix<real,-1,1> DVec; typedef Eigen::Matrix<real,-1,-1> DMat; #define CSmallLMSolver_Max(a,b) (a)>(b) ? (a) : (b); public: CDenseLMSolver() { useBound = false; } real Optimize(DVec& xStart, int nMaxIter, bool isInfoShow=false) { const static real eps_1 = real(1e-12); const static real eps_2 = real(1e-12); const static real tau = real(1e-3); DMat Jac(M,N); DMat JacT(N,M); DMat JacTJac(N,N); DVec f(M); //f(x) DVec g(N); //g=-J'*f DVec h(N); //(J'J + mu * I) h = g DVec xnew(N), x(N); DVec fnew(M); //f(xnew) Eigen::LDLT<DMat> solver; //initial settings x = xStart; CalcJacobiFunc(x, Jac); //J JacT = Jac.transpose(); real f_energy = CalcEnergyFunc(x, f); //f g = JacT * (-f); //g if(g.norm() <= eps_1) return true; JacTJac = JacT*Jac; solver.compute(JacTJac); h = solver.solve(g); //J'Jh = g //find the diag of J'J real v=real(2), mu=real(0); for(int i=0; i<JacTJac.rows(); i++) mu = CSmallLMSolver_Max(mu, JacTJac(i,i)); mu *= tau; int iter = 0; for(iter=0; iter < nMaxIter; iter++) { //+mu*I for(int i=0; i<JacTJac.rows(); i++) JacTJac(i,i) += mu; //solve solver.compute(JacTJac); h = solver.solve(g); //-mu*I for(int i=0; i<JacTJac.rows(); i++) JacTJac(i,i) -= mu; if(h.norm() <= eps_2 * (x.norm() + eps_2)) break; xnew = x + h; if (useBound) { xnew = xnew.cwiseMin(x_upper); xnew = xnew.cwiseMax(x_lower); h = xnew - h; } real fnew_energy = CalcEnergyFunc(xnew, fnew); //fnew //dL = L(0) - L(h) real dL = h.dot(mu*h+g); real dF = f_energy - fnew_energy; //real dF = f.dot(f) - fnew.dot(fnew); real rho = dF/dL; if(rho > 0) { x = xnew; //x f = fnew; //f f_energy = fnew_energy; CalcJacobiFunc(x, Jac); //J JacT = Jac.transpose(); g = JacT * (-f); //g if(g.norm() <= eps_1) break; JacTJac = JacT * Jac; mu *= CSmallLMSolver_Max(real(1./3.), real(1-pow(2*rho-1,3))); v = 2; } else { mu *= v; v *= 2; } if (isInfoShow && iter % 10 == 0) printf("iter: %d, energy: %f, dif: %f\n", iter, sqrt(f.dot(f)), h.norm()/x.norm()); }//end for iter if (isInfoShow) printf("iter: %d, energy: %f, dif: %f\n", iter, sqrt(f.dot(f)), h.norm() / x.norm()); xStart = x; return f_energy; } void SetBound(DVec& xMin, DVec& xMax) { x_lower = xMin; x_upper = xMax; useBound = true; } protected: virtual real CalcEnergyFunc(const DVec& x, DVec& fx)=0; //default: calc jacobi matrix numerically with forward diff virtual void CalcJacobiFunc(DVec& x, DMat& jac) { const static real delta = real(1e-8); DVec fx(M), fxp(M); CalcEnergyFunc(x, fx); for(int j=0; j<N; j++) { real d=real(1e-4)*x[j]; // force evaluation d=fabs(d); if(d<delta) d=delta; x[j] += d; CalcEnergyFunc(x, fxp); x[j] -= d; d = real(1.0)/d; for(int i=0; i<M; i++) { jac(i,j) = (fxp[i]-fx[i])*d; } }//end for j } protected: int M; int N; bool useBound; DVec x_lower, x_upper; }; class CSparseLMSolver { public: typedef double real; typedef Eigen::Matrix<real,-1,1> Vec; typedef Eigen::SparseMatrix<real, Eigen::ColMajor> SpMat; #define CSmallLMSolver_Max(a,b) (a)>(b) ? (a) : (b); public: real Optimize(Vec& xStart, int nMaxIter, bool showInfo=true) { const static real eps_1 = real(1e-12); const static real eps_2 = real(1e-12); const static real tau = real(1e-3); //define jacobi structure DefineJacobiStructure(m_jacobi, m_jacobiT); SpMat JacTJac; Vec f(m_jacobi.rows()); //f(x) Vec g(m_jacobi.cols()); //g=-J'*f Vec h(m_jacobi.cols()); //(J'J + mu * I) h = g Eigen::VectorXi diagPos(m_jacobi.cols()); Vec diagKept(m_jacobi.cols()); Vec xnew(xStart.size()); Vec fnew(m_jacobi.rows()); //f(xnew) //define structure of J'J JacTJac = m_jacobiT * m_jacobiT.transpose(); Eigen::SimplicialCholesky<SpMat> solver(JacTJac); //initial settings CalcJacobiFunc(xStart,m_jacobi, m_jacobiT); //J CalcEnergyFunc(xStart, f); //f g = m_jacobiT * (-f); //g if(g.norm() <= eps_1) return f.dot(f); FastAtAGivenStructure(m_jacobi, m_jacobiT, JacTJac); //JacTJac = m_jacobiT * m_jacobiT.transpose(); solver.factorize(JacTJac); h = solver.solve(g); //J'Jh = g //find the diag of J'J real v=real(2), mu=real(0); Eigen::Diagonal<SpMat> diag(JacTJac); for(int i=0; i<diag.size(); i++) mu = CSmallLMSolver_Max(mu, diag[i]); mu *= tau; for(int iter=0; iter < nMaxIter; iter++) { //+mu*I diag += Vec(diag.size()).setConstant(mu); //solve solver.factorize(JacTJac); h = solver.solve(g); //-mu*I diag -= Vec(diag.size()).setConstant(mu); if(h.norm() <= eps_2 * (xStart.norm() + eps_2)) break; xnew = xStart + h; CalcEnergyFunc(xnew, fnew); //fnew //dL = L(0) - L(h) real dL = h.dot(mu*h+g); real dF = f.dot(f) - fnew.dot(fnew); real rho = dF/dL; if(rho > 0) { xStart = xnew; //x f = fnew; //f CalcJacobiFunc(xStart,m_jacobi, m_jacobiT); //J g = m_jacobiT * (-f); //g if(g.norm() <= eps_1) break; FastAtAGivenStructure(m_jacobi, m_jacobiT, JacTJac); //JacTJac = m_jacobiT * m_jacobiT.transpose(); mu *= CSmallLMSolver_Max(real(1./3.), real(1-pow(2*rho-1,3))); v = 2; } else { mu *= v; v *= 2; } if((iter+1)%10==0 && showInfo) printf("L-M:%d dif: %ef energy: %ef mu:%ef\n", iter, h.norm()/(eps_2+xStart.norm()), rho, mu); }//end for iter return f.dot(f); } protected: //define the structure of jac and jacT virtual void DefineJacobiStructure(SpMat& jac, SpMat& jacT)=0; virtual void CalcEnergyFunc(const Vec& x, Vec& fx)=0; //default: calc jacobi matrix numerically with forward diff //Note that structure of both jac and jacT should be given //children classes should fill both jac and jacT virtual void CalcJacobiFunc(Vec& pTest, SpMat& jac, SpMat& jacT) { Vec p = pTest; const int nobs = jac.rows(); const int nvars = jac.cols(); register int i, j, jj, k; register real d; int ii, m, *jcol, *varlist, *coldone, forw; int *vidxs, *ridxs; real *tmpd; real delta; /* retrieve problem-specific information passed in *dat */ Vec hx(nobs), hxx(nobs); forw=1; delta=real(1e-8); CalcEnergyFunc(p, hx);//hx = f(p) jcol=(int *)malloc(nobs*sizeof(int)); /* keeps track of measurements influenced by the set of variables currently in "varlist" below */ for(i=0; i<nobs; ++i) jcol[i]=-1; vidxs=(int *)malloc(2*nobs*sizeof(int)); ridxs=vidxs+nobs; varlist=(int *)malloc(nvars*sizeof(int)); /* stores indices of J's columns which are computed with the same "func" call */ coldone=(int *)malloc(nvars*sizeof(int)); /* keeps track of J's columns which have been already computed */ memset(coldone, 0, nvars*sizeof(int)); /* initialize to zero */ tmpd=(real *)malloc(nvars*sizeof(real)); for(j=0; j<nvars; ++j) { real scl; if(coldone[j]) continue; /* column j already computed */ //for(i=0; i<nobs; ++i) jcol[i]=-1; k=FindColIndices(jac, j, vidxs, ridxs); for(i=0; i<k; ++i) jcol[ridxs[i]]=j; varlist[0]=j; m=1; coldone[j]=1; for(jj=j+1; jj<nvars; ++jj) { if(coldone[jj]) continue; /* column jj already computed */ k=FindColIndices(jac, jj, vidxs, ridxs); for(i=0; i<k; ++i) if(jcol[ridxs[i]]!=-1) goto nextjj; if(k==0) { coldone[jj]=1; continue; } /* all zeros column, ignore */ /* column jj does not clash with previously considered ones, mark it */ for(i=0; i<k; ++i) jcol[ridxs[i]]=jj; varlist[m++]=jj; coldone[jj]=1; nextjj: continue; } for(k=0; k<m; ++k) { /* determine d=max(SPLM_DELTA_SCALE*|p[varlist[k]]|, delta), see HZ */ d=real(1e-4)*p[varlist[k]]; // force evaluation d=fabs(d); if(d<delta) d=delta; tmpd[varlist[k]]=d; p[varlist[k]]+=d; } CalcEnergyFunc(p, hxx);// hxx=f(p+d) if(forw) { for(k=0; k<m; ++k) p[varlist[k]]-=tmpd[varlist[k]]; /* restore */ scl=1.0; } else { // central for(k=0; k<m; ++k) p[varlist[k]]-=2*tmpd[varlist[k]]; CalcEnergyFunc(p, hx);// hx=f(p-d) for(k=0; k<m; ++k) p[varlist[k]]+=tmpd[varlist[k]]; /* restore */ scl=0.5; // 1./2. } for(k=0; k<m; ++k) { d=tmpd[varlist[k]]; d=scl/d; /* invert so that divisions can be carried out faster as multiplications */ jj=FindColIndices(jac, varlist[k], vidxs, ridxs); for(i=0; i<jj; ++i) { ii=ridxs[i]; jac.valuePtr()[vidxs[i]]=(hxx[ii]-hx[ii])*d; jcol[ii]=-1; /* restore */ } } }//end for jj //calc FastTransGivenStructure(jac, jacT); free(tmpd); free(coldone); free(varlist); free(vidxs); free(jcol); } protected: int FindColIndices(const SpMat& A, int cid, int* vidx, int* ridx) { int ns = A.outerIndexPtr()[cid], ne = A.outerIndexPtr()[cid+1]; int k=0; for(int i=ns; i<ne; i++,k++) { ridx[k] = A.innerIndexPtr()[i]; vidx[k] = i; } return k; } void FastTransGivenStructure(const Eigen::SparseMatrix<real>& A, Eigen::SparseMatrix<real>& At) { Eigen::VectorXi positions(At.outerSize()); for(int i=0; i<At.outerSize(); i++) positions[i] = At.outerIndexPtr()[i]; for (int j=0; j<A.outerSize(); ++j) { for (Eigen::SparseMatrix<double>::InnerIterator it(A, j); it; ++it) { int i = it.index(); int pos = positions[i]++; At.valuePtr()[pos] = it.value(); } } } void FastAtAGivenStructure(const Eigen::SparseMatrix<real>& A, const Eigen::SparseMatrix<real>& At, Eigen::SparseMatrix<real>& AtA) { const static int nThread = 1; //omp_set_num_threads(nThread); Eigen::VectorXd Tmps[nThread]; Eigen::VectorXi Marks[nThread]; for(int i=0; i<nThread; i++) { Tmps[i].resize(AtA.innerSize()); Marks[i].resize(AtA.innerSize()); Marks[i].setZero(); } //#pragma omp parallel for for(int j=0; j<AtA.outerSize(); j++) { int tid = 0;//omp_get_thread_num(); Eigen::VectorXd& Tmp = Tmps[tid]; Eigen::VectorXi& Mark = Marks[tid]; for(Eigen::SparseMatrix<double>::InnerIterator it_A(A, j); it_A; ++it_A) { int k = it_A.index(); double v_A = it_A.value(); for(Eigen::SparseMatrix<double>::InnerIterator it_At(At, k); it_At; ++it_At) { int i = it_At.index(); double v_At = it_At.value(); if(!Mark[i]) { Mark[i] = 1; Tmp[i] = v_A*v_At; } else Tmp[i] += v_A*v_At; }//end for it_At }//end for it_A for(Eigen::SparseMatrix<double>::InnerIterator it(AtA, j); it; ++it) { int i = it.index(); it.valueRef() = Tmp[i]; Mark[i] = 0; } }//end for i } protected: SpMat m_jacobi; SpMat m_jacobiT; }; class CDenseNewtonSolver :public CDenseLMSolver { public: real Optimize(DVec& xStart, int nMaxIter, bool showInfo = false) { DMat jac(M, N); DMat JacTJac(N,N); DVec fx(M), fx1(M), h(N), g(N); Eigen::LDLT<DMat> solver; real f_energy = 0; //Gauss-Newton Optimization for (int iter = 0; iter<nMaxIter; iter++) { CalcJacobiFunc(xStart, jac); //J JacTJac = jac.transpose() * jac; f_energy = CalcEnergyFunc(xStart, fx); //f //solve: J'J h = - J' f(x) g = jac.transpose() * (-fx); solver.compute(JacTJac); h = solver.solve(g); real normv = xStart.norm(); for (real alpha = 1; alpha > 1e-15; alpha *= 0.5) { DVec x = xStart + h; if (useBound) { x = x.cwiseMin(x_upper); x = x.cwiseMax(x_lower); } real f1_energy = CalcEnergyFunc(x, fx1); //f //if (f1_energy > f_energy) if (fx1.dot(fx1) > fx.dot(fx)) h = h * 0.5; else { xStart = x; break; } } real normh = h.norm(); if (showInfo) printf("Gauss-Newton: %d -- %f, energy: %f\n", iter, normh / normv, sqrt(fx.dot(fx))); if (normh < (normv + real(1e-6)) * real(1e-6)) break; } return f_energy; } }; class CSparseNewtonSolver:public CSparseLMSolver { public: real Optimize(Vec& xStart, int nMaxIter, bool showInfo=true) { //define jacobi structure DefineJacobiStructure(m_jacobi, m_jacobiT); SpMat JacTJac; Vec fx(m_jacobi.rows()), h(m_jacobi.cols()), g(m_jacobi.cols()), fx1(m_jacobi.rows()); //define structure of J'J JacTJac = m_jacobiT * m_jacobi; Eigen::SimplicialCholesky<SpMat> solver; solver.analyzePattern(JacTJac.triangularView<Eigen::Lower>()); //Gauss-Newton Optimization for(int iter=0; iter<nMaxIter; iter++) { CalcJacobiFunc(xStart, m_jacobi, m_jacobiT); //J //JacTJac = m_jacobiT * m_jacobi;//J'J FastAtAGivenStructure(m_jacobi, m_jacobiT, JacTJac); CalcEnergyFunc(xStart, fx); //f //solve: J'J h = - J' f(x) g = m_jacobiT * (-fx); solver.factorize(JacTJac.triangularView<Eigen::Lower>()); h = solver.solve(g); real normv = xStart.norm(); double old_energy = fx.dot(fx); for (real alpha = 1; alpha > 1e-15; alpha *= 0.5) { Vec x = xStart + h; CalcEnergyFunc(x, fx1); //f double new_energy = fx1.dot(fx1); if (new_energy > old_energy) h = h * 0.5; else { xStart = x; break; } } real normh = h.norm(); if(showInfo) printf("Gauss-Newton: %d -- %f\n", iter, normh/normv); if(normh < (normv+real(1e-6)) * real(1e-6)) break; } return fx.dot(fx); } };
GB_unop__identity_uint8_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__(none)) // op(A') function: GB (_unop_tran__identity_uint8_uint8) // C type: uint8_t // A type: uint8_t // cast: uint8_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ #if 0 GrB_Info GB (_unop_apply__(none)) ( uint8_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint8_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
opencl_agilekeychain_fmt_plug.c
/* 1Password Agile Keychain cracker patch for JtR. Hacked together during * July of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> and * Copyright (c) 2012 Dhiru Kholia <dhiru.kholia at gmail.com>, and it is * hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * This software is based on "agilekeychain" project but no actual code is * borrowed from it. * * "agilekeychain" project is at https://bitbucket.org/gwik/agilekeychain */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_agilekeychain; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_agilekeychain); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "stdint.h" #include "misc.h" #include "aes.h" #include "common-opencl.h" #include "options.h" #include "jumbo.h" #define FORMAT_LABEL "agilekeychain-opencl" #define FORMAT_NAME "1Password Agile Keychain" #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL AES" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN 4 #define SALTLEN 8 #define CTLEN 1040 typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } keychain_password; typedef struct { uint32_t v[16/4]; } keychain_hash; typedef struct { int iterations; int outlen; uint8_t length; uint8_t salt[SALTLEN]; } keychain_salt; static int *cracked; static int any_cracked; static struct fmt_tests keychain_tests[] = { {"$agilekeychain$2*1000*8*7146eaa1cca395e5*1040*e7eb81496717d35f12b83024bb055dec00ea82843886cbb8d0d77302a85d89b1d2c0b5b8275dca44c168cba310344be6eea3a79d559d0846a9501f4a012d32b655047673ef66215fc2eb4e944a9856130ee7cd44523017bbbe2957e6a81d1fd128434e7b83b49b8a014a3e413a1d76b109746468070f03f19d361a21c712ef88e05b04f8359f6dd96c1c4487ea2c9df22ea9029e9bc8406d37850a5ead03062283a42218c134d05ba40cddfe46799c931291ec238ee4c11dc71d2b7e018617d4a2bf95a0c3c1f98ea14f886d94ee2a65871418c7c237f1fe52d3e176f8ddab6dfd4bc039b6af36ab1bc9981689c391e71703e31979f732110b84d5fccccf59c918dfcf848fcd80c6da62ced6e231497b9cbef22d5edca439888556bae5e7b05571ac34ea54fafc03fb93e4bc17264e50a1d04b688fcc8bc715dd237086c2537c32de34bbb8a29de0208800af2a9b561551ae6561099beb61045f22dbe871fab5350e40577dd58b4c8fb1232f3f85b8d2e028e5535fd131988a5df4c0408929b8eac6d751dcc698aa1d79603251d90a216ae5e28bffc0610f61fefe0a23148dcc65ab88b117dd3b8d311157424867eb0261b8b8c5b11def85d434dd4c6dc7036822a279a77ec640b28da164bea7abf8b634ba0e4a13d9a31fdcfebbdbe53adcdf2564d656e64923f76bc2619428abdb0056ce20f47f3ece7d4d11dc55d2969684ca336725561cb27ce0504d57c88a2782daccefb7862b385d494ce70fef93d68e673b12a68ba5b8c93702be832d588ac935dbf0a7b332e42d1b6da5f87aed03498a37bb41fc78fcdbe8fe1f999fe756edf3a375beb54dd508ec45af07985f1430a105e552d9817106ae12d09906c4c28af575d270308a950d05c07da348f59571184088d46bbef3e7a2ad03713e90b435547b23f340f0f5d00149838d9919d40dac9b337920c7e577647fe4e2811f05b8e888e3211d9987cf922883aa6e53a756e579f7dff91c297fcc5cda7d10344545f64099cfd2f8fd59ee5c580ca97cf8b17e0222b764df25a2a52b81ee9db41b3c296fcea1203b367e55d321c3504aeda8913b0cae106ccf736991030088d581468264b8486968e868a44172ad904d97e3e52e8370aaf52732e6ee6cc46eb33a901afc6b7c687b8f6ce0b2b4cdfe19c7139615195a052051becf39383ab83699a383a26f8a36c78887fe27ea7588c0ea21a27357ff9923a3d23ca2fb04ad671b63f8a8ec9b7fc969d3bece0f5ff19a40bc327b9905a6de2193ffe3aa1997e9266205d083776e3b94869164abcdb88d64b8ee5465f7165b75e1632abd364a24bb1426889955b8f0354f75c6fb40e254f7de53d8ef7fee9644bf2ebccd934a72bb1cc9c19d354d66996acbddd60d1241657359d9074a4b313b21af2ee4f10cf20f4122a5fad4ee4f37a682ffb7234bea61985d1ad130bfb9f4714461fb574dbf851c*1000*8*c05f3bc3e7f3cad7*1040*f3e3d091b64da1529b04b2795898b717faad59f7dae4bda25e6e267c28a56a7702e51991b2a3fb034cdda2d9bfd531dfd2c3af00f39fdfe8bcbdde02ab790415bcf071d133b15f647f55ff512730ae4914ce20b72184c827f6350ac768b00c9eab0e3322e084bb3e9e9439a10030950f5504dcc4f7ba614b27fde99bd0d743a58341e90ec313395486eb8068df205b7bdf25134ed97dd2e2883d7eb3e63b659602ada765084a69d7ed8fc55b60aa67718cc9e5bf31ab8f3029b32a4b001071848d2b76b5f4b921d2169ca287e9e78ecd904d040c817c7c7cde4ba8510b462e139c16519962ca0adb7d5f89d431cd4541a9a7aaec8d799697f4d3947d87884bed32ada13db725c72ab6450ac8fe989a94917cca784bcf6ffbe756f19d4e8897e0f80d8c318e13e5b30fc356646aaf038a952b0781f12dfef1f4bd6922ae05a573eeff4dbb064cfbb0fd62962a6a53a8de308da2b8e83baebfe261cb127f874a5eff3f05cda123ab2ba559cf444ce33b6845f4c902733b8982044151a8aa1859769082ade5928f2d4f616ce972ae8dde1f2be37d496ad16057008dfe678c75cbdc53db25ed311edbcf8b2a73bcd2809f6bd1d389aaeed82a75fa15676d08aa5390efdc189c180be6a52ec5a7371304d26e477039197671377d1ea3d6ee41e68a42348a4fe9a1d2400eaeba8ed0a7419b9694d780456d96378c00318a5be0f41afa887476b3bebb7cf30d61ca8fc77de35671a3053a517aa39444e01e1752da3146dc97eec5849d6f025c3d4bc6e0499b901f629d8a081ad35ed33602cbef5e9a68f090170fcc1f285eb094e3dc619740a067fd2aeeb20abbb17926c3ad097f3f0bad4de540d1829a985cd7e700100622ec47da046071c11a1597e5f093268b4ed79ffcf2450b9ba2b649b932fbce912bdb4da010581bd9c731be792c8f75177f6c8c4e1756d63a1491a8aae4bb11beeca118e7d08073b500dd82b81e4bdbeb15625afca8f1c8e06b2360da972587516ef62e91d1d9aad90e62226d53363bff318f5af21f69c234731ac22b09506a1b807d2366e88905668d960c7963daa93046e9a56db1d7a437e9a37aa7a2945197265478b264ec14d383030ef73504fd26d4be9e72ebddb14a00bf6bd66a3adaa1d17cada378a2b0bc852f961af52333f7966f8a60738dfd47e79ce537082f187117ffd31f54f53356b671154dfa245671c4cd054c1a8d303a202fccfae6d3f9e3646838cef38703b5e660b5ce7679f5898d801908f90092dbec335c98e4002041287fe9bfa7d7828a29ab240ec2cedc9fa12cfd7c3ef7b61dad4fbf2ef9c0a904dbde1b3792fb5178607608dc9fc2fbc85addf89fa3df94317e729810b508356b5bb176cdb022afb0ec5eeff4d5081b66733d1be1b54cc4f080bfc33187663b5ab185472b35dc8812e201472e6af376c43ee23aa2db6cd04bddd79b99b0c28c48a5ae", "openwall"}, {"$agilekeychain$1*1000*8*54434b3047723444*1040*316539685a36617546544a61466e35743970356559624464304467394a4a41615459594a6b66454c5462417a7a694b5751474e4748595036344f3945374b414b676b6b7278673658794e63734a316c48656b496a3156346a544c6861797537347032466b4d6b416d31704a6b5063547a44703152544f72696e6e38347732597672774f6476414c70346462595a7678656b6e5958716b7a61746d5874514e575965564735627a437578584e4a573050567939413073306c377a4d726e6d576a6655424455394f4934696c48454f4d536e635567393950686d4171364f76747749446130454c6d74783069704d30456d45374f56736e486a5534667877327a526e52596e55454452393544437042646e6739355938714836584968664c4d7a726a4f63544c6858385141464c71565463664270493761664d633055447879613169456a72664479346438305641417054754775477a475266766c4774543668673848624d31636c37624e73743549634457655375507138535139396c4c39364c4f6f757a43305535586161364b47676a61713971394459526a78744e547459797a6a57715a3575534364487a4430306d4e4e39483277674c733238726463616d4f5146467957374234727252774b6d6161664b6d67414d5854496444665848684c376c6c776d47477a4b57566d5a3646346e775441446f3659745038646d336b6370494d50676742797a41325630716e794833793237494152496477556e4d6c4751497367346672635364486e6e71504f6e6264575953584462586c6e573947347a567163535333366e3253504d65656b45483841544f6952384d6170724471706c4a307863713653707265624f544a4d5139377562454a334b776e4879746a37704e37694557484d69696d436f484973613443754d484b4f51484833545a364654694a6d31783061665536796c444f7257666964397243444f684d305a324c6b75693953716664354b435963703559354978757a64354a755158394136663744435a674e4c73484a7935737a707739724c783077316631637349757a6d696252576244396a537730593143633348385a775734534b646569684f634f4c35323364734b7179625750364b76344a4a56626c4f727069366f575a386432745375684c464e42643173445a6a50745743696e666a4458325058644d57654c596d326f5763516a7951524a566372354d4d58435877765172596b734c59354476455156746d75504830444a4e47624e31524f4d544b4a6b4d675835305a7a56736758794c475057714e78496452725269484c75424f4d6d793550677277727453597045566e304c5642764c5a6732504c7a4e71584c4c67634979637369554a3446497655795a78583547306b365a4e337477786c7961796b4d787463796971596f516fcb3584235d7ecde5f8b7bc2b8f1e9e2e*46c3b75f6e4cf139e92f683f32107271", "123"}, {"$agilekeychain$1*1000*8*7a697868444e7458*1040*773954704874444d4d523043546b44375135544f74675a754532624a45794848305949436e4e724d336c524c39316247426a7843317131614152736d50724c6474586a4d4d445954786c31376d363155437130777a414d36586c7045555457424a5a436a657541456742417961654472745a73576e4b7a7a344d547043567846526655524b4339573631756f3850465a3878306b7176644c4253787071764c58376e716a50674f526d4a4e4b546e3359575175614b304a3964756f756935675a77544f4e6770654855776f79553465786e41364d6376496b7651624762424d62756746796a6753514c37793069783869683773454c533559365946584f545246616d48495730464e634d42466e51367856797a4368517335674a755972434b545944633270764e54775879563542776675386b6e4462506b743138694a756d63447134745361526a32373167366e787375514e346a73574e77796b4b49376d3677653448754c364b5a41514633626e71786130634458544e484a436551386e7679304b786d73346f774a383268665167596b466e39317a307269714434546d4d6173416e344b6a74455a584846526a6659746742504262495958386336755241386c496633417666696d7a5036425745757461736b684574794a5230436d50466d4b536375764674674562315679766a43453077356e614b476d345849395a726b7037626153496b6a66634f355261795157645941487731516f564c6764516d4e3074394b3839526341626f6b6b38324465497068624553646f4177786e6f68347779523338394f4e6561315271635236374d424d695978304b336b4a6966776e74614f4b43483237434b596a6630774e79394a4b7153714a48616b4b364455596a454b31433767786a72303450706d44666373574c5a61324f335852474b756c456b76483349754e3156654f417342324d6f75346d4b78774e43424863566e344c4c6c6c6d4e446b617550415a6f3337764f55484b4156344d4769336267344f4737794c354c5567636a565a6b7369616730383377744d69513431333032305a4a3747794944714d67396a5651444132424e79507a34726d346c333552757a764b6c543073437562534376714f346a5939784a546f683358517348623378716677313231383261685357743236455a6a6b6674365870554642386436574c374430635177347278736a744a6e463530756365684c7779497557366550356936514e704e4863353863437165397163496146794a726555714c623438543235396371416154326c66375276746e3550727453306b7042335961364239586c3359384b464865564e677636537234414e4d6c55583867456376686e43646e6e776a6f656d7152613453725148503462744b4a334565714f6e624a774a65623258552fff2bf0505a0bc88b9cbc9073a74586*a6f6556c971bd3ad40b52751ba025713", ""}, {"$agilekeychain$1*1000*8*7a65613743636950*1040*524a397449393859696b4a576e437763716a574947544a6d306e32474442343355764a7a6948517a45686d7569636631514745347448424e4e6b32564239656a55596f724671547638736d4e66783949504b6f38746b6f49426d4d6b794c7a6d3077327639365a4b515934357774664a477247366b5539486135495863766845714146317458356b725a6a50376f726e55734b3136533756706a4b42516165656a50336e4558616450794f59506f4771347268454730784555485a4f5a4772526a76354f45417470616258375a386436474b366f7653583257335939516d4f5364446a414b674e467a31374f716d73516b3362795776305a414a314f63324d616a6c6472413939443879414c523733794c47467654734d7a6a4734733461674353357a4456527841486233646d446e797448696837377364784344704831784f6a5975666168626b5534796678576c59584d4b3448704a784a4f675a6d7672636b5a4b567071445a345a376648624b55414b7262694972384531336c7a6875725a6f44627571775361774b66417743336230614e4166564954334a6c3477666b4254374f747565394b32667266566d3263416a656c79416c45724b3035504a4e42307a33303632483466664272705765415a4f3552416a36544e5a54415a5976666a4b53675a68493071394a6563426964544a4f564d304a773976394944444339516e564a78587539366974586c4f6132717937354c554b65384b7638585132596832417a5271314e4b5653766d4d50506d3554463762763961554e45695a51436e79504f6e7146617a755231373574455365305446624c636450424a43526a49384b32365967496a734c324e525574526e36714c533065694f536c6c37795a456945476d4a6e327262646942416c485046616e384e4d7869427571777355714e7638305267537752726245696c734d68664b53793836684b39445a716b47546d4b59747176474c6b6a6d52513368796b367a356449706c64385541614236546e426a6b4f64766d33493972763941765a71776345686b734c594a7254446c796f46444b6d557441305a636b414e437245587a63487a30304c50564e4e73694d634d5a6f4f74414534424f53685879374e62545734487a555054774a7056686f6a7453666a664e696d354548345631374c61396862586659666332304e465a5678656a304b4d59586d586547634d67474c6d31794a4b546473474c755a697579625779503259726d6d5248544f6f704b575046556e3438415a48474168396d787136327230367248774e73493439693049794b3765314b4f74547265556c564b6e6d594a5959355a7476334b546f75375a6a676c755a557a39744b54747745583948314a37366e6c6d5a53345079555856696438336876596141617a394438711ee66b990b013609582733309b01df00*444f4656a5ec58e8a75204fb25fd5ae5", "PASSWORD"}, {NULL} }; static struct custom_salt { unsigned int nkeys; unsigned int iterations[2]; unsigned int saltlen[2]; unsigned char salt[2][SALTLEN]; unsigned int ctlen[2]; unsigned char ct[2][CTLEN]; } *cur_salt; static cl_int cl_error; static keychain_password *inbuffer; static keychain_hash *outbuffer; static keychain_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; size_t insize, outsize, settingsize, cracked_size; static struct fmt_main *self; #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(keychain_password) * gws; outsize = sizeof(keychain_hash) * gws; settingsize = sizeof(keychain_salt); cracked_size = sizeof(*cracked) * gws; inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); cracked = mem_calloc(1, cracked_size); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(cracked); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", PLAINTEXT_LENGTH, (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(keychain_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr; int ctlen; int saltlen; char *p; if (strncmp(ciphertext, "$agilekeychain$", 15) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 15; if ((p = strtokm(ctcopy, "*")) == NULL) /* nkeys */ goto err; if (!isdec(p)) goto err; if (atoi(p) > 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt length */ goto err; if (!isdec(p)) goto err; saltlen = atoi(p); if(saltlen > SALTLEN) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if(hexlenl(p) != saltlen * 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* ct length */ goto err; if (!isdec(p)) goto err; ctlen = atoi(p); if (ctlen > CTLEN) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* ciphertext */ goto err; if(hexlenl(p) != ctlen * 2) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 15; /* skip over "$agilekeychain$" */ p = strtokm(ctcopy, "*"); cs.nkeys = atoi(p); p = strtokm(NULL, "*"); cs.iterations[0] = atoi(p); p = strtokm(NULL, "*"); cs.saltlen[0] = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.saltlen[0]; i++) cs.salt[0][i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.ctlen[0] = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.ctlen[0]; i++) cs.ct[0][i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->saltlen[0]); currentsalt.length = cur_salt->saltlen[0]; currentsalt.iterations = cur_salt->iterations[0]; currentsalt.outlen = 16; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } #undef set_key static void set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int akcdecrypt(unsigned char *derived_key, unsigned char *data) { unsigned char out[CTLEN]; int n, key_size; AES_KEY akey; unsigned char iv[16]; memcpy(iv, data + CTLEN - 32, 16); if (AES_set_decrypt_key(derived_key, 128, &akey) < 0) fprintf(stderr, "AES_set_decrypt_key failed in crypt!\n"); AES_cbc_encrypt(data + CTLEN - 16, out + CTLEN - 16, 16, &akey, iv, AES_DECRYPT); n = check_pkcs_pad(out, CTLEN, 16); if (n < 0) return -1; key_size = n / 8; if (key_size != 128 && key_size != 192 && key_size != 256) // "invalid key size" return -1; return 0; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } /// Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) if (!akcdecrypt((unsigned char*)outbuffer[index].v, cur_salt->ct[0])) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations[0]; } struct fmt_main fmt_opencl_agilekeychain = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT, { "iteration count", }, keychain_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
GB_unop__abs_int16_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__abs_int16_int16 // op(A') function: GB_unop_tran__abs_int16_int16 // C type: int16_t // A type: int16_t // cast: int16_t cij = aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ int16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CAST(z, aij) \ int16_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = aij ; \ Cx [pC] = GB_IABS (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__abs_int16_int16 ( int16_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; int16_t z = aij ; Cx [p] = GB_IABS (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__abs_int16_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 32; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-6,8),ceild(8*t2-Nz-19,32));t3<=min(floord(4*Nt+Ny-9,32),floord(4*t1+Ny-1,32));t3++) { for (t4=max(max(ceild(t1-254,256),ceild(8*t2-Nz-1011,1024)),ceild(32*t3-Ny-1011,1024));t4<=min(min(floord(4*Nt+Nx-9,1024),floord(4*t1+Nx-1,1024)),floord(32*t3+Nx+19,1024));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),8*t3+6),256*t4+254);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) { lbv=max(1024*t4,4*t5+4); ubv=min(1024*t4+1023,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
ToRORd_fkatp_endo.c
#include "ToRORd_fkatp_endo.h" #include <stdlib.h> real max_step; real min_step; real abstol; real reltol; bool adpt; real *ode_dt, *ode_previous_dt, *ode_time_new; GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; //for count and m } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { log_to_stdout_and_file("Using ToRORd_fkatp_endo CPU model\n"); uint32_t num_cells = solver->original_num_cells; solver->sv = (real*)malloc(NEQ*num_cells*sizeof(real)); max_step = solver->max_dt; min_step = solver->min_dt; abstol = solver->abs_tol; reltol = solver->rel_tol; adpt = solver->adaptive; if(adpt) { ode_dt = (real*)malloc(num_cells*sizeof(real)); OMP(parallel for) for(int i = 0; i < num_cells; i++) { ode_dt[i] = solver->min_dt; } ode_previous_dt = (real*)calloc(num_cells, sizeof(real)); ode_time_new = (real*)calloc(num_cells, sizeof(real)); log_to_stdout_and_file("Using Adaptive Euler model to solve the ODEs\n"); } else { log_to_stdout_and_file("Using Euler model to solve the ODEs\n"); } OMP(parallel for) for(uint32_t i = 0; i < num_cells; i++) { real *sv = &solver->sv[i * NEQ]; sv[0] = -8.876380e+01f; //v millivolt sv[1] = 1.110000e-02f; //CaMKt millimolar sv[2] = 1.210250e+01f; //nai millimolar sv[3] = 1.210290e+01f; //nass millimolar sv[4] = 1.423002e+02f; //ki millimolar sv[5] = 1.423002e+02f; //kss millimolar sv[6] = 8.158300e-05f; //cai millimolar sv[7] = 7.030500e-05f; //cass millimolar sv[8] = 1.521100e+00f; //cansr millimolar sv[9] = 1.521400e+00f; //cajsr millimolar sv[10] = 8.057200e-04f; //m dimensionless sv[11] = 8.286000e-01f; //h dimensionless sv[12] = 8.284000e-01f; //j dimensionless sv[13] = 6.707000e-01f; //hp dimensionless sv[14] = 8.281000e-01f; //jp dimensionless sv[15] = 1.629000e-04f; //mL dimensionless sv[16] = 5.255000e-01f; //hL dimensionless sv[17] = 2.872000e-01f; //hLp dimensionless sv[18] = 9.509800e-04f; //a dimensionless sv[19] = 9.996000e-01f; //iF dimensionless sv[20] = 5.936000e-01f; //iS dimensionless sv[21] = 4.845400e-04f; //ap dimensionless sv[22] = 9.996000e-01f; //iFp dimensionless sv[23] = 6.538000e-01f; //iSp dimensionless sv[24] = 8.108400e-09f; //d dimensionless sv[25] = 1.000000e+00f; //ff dimensionless sv[26] = 9.390000e-01f; //fs dimensionless sv[27] = 1.000000e+00f; //fcaf dimensionless sv[28] = 9.999000e-01f; //fcas dimensionless sv[29] = 1.000000e+00f; //jca dimensionless sv[30] = 1.000000e+00f; //ffp dimensionless sv[31] = 1.000000e+00f; //fcafp dimensionless sv[32] = 6.646200e-04f; //nca_ss dimensionless sv[33] = 1.200000e-03f; //nca_i dimensionless sv[34] = 9.981000e-01f; //C3 dimensionless sv[35] = 8.510900e-04f; //C2 dimensionless sv[36] = 7.034400e-04f; //C1 dimensionless sv[37] = 3.758500e-04f; //O dimensionless sv[38] = 1.328900e-05f; //I dimensionless sv[39] = 2.480000e-01f; //xs1 dimensionless sv[40] = 1.770700e-04f; //xs2 dimensionless sv[41] = 1.612900e-22f; //Jrel_np millimolar_per_millisecond sv[42] = 1.247500e-20f; //Jrel_p millimolar_per_millisecond } } SOLVE_MODEL_ODES(solve_model_odes_cpu) { uint32_t sv_id; size_t num_cells_to_solve = ode_solver->num_cells_to_solve; uint32_t * cells_to_solve = ode_solver->cells_to_solve; real *sv = ode_solver->sv; real dt = ode_solver->min_dt; uint32_t num_steps = ode_solver->num_steps; #pragma omp parallel for private(sv_id) for (u_int32_t i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; if(adpt) { solve_forward_euler_cpu_adpt(sv + (sv_id * NEQ), stim_currents[i], current_t + dt, sv_id); } else { for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = dt*rDY[i] + rY[i]; } void solve_forward_euler_cpu_adpt(real *sv, real stim_curr, real final_time, int sv_id) { const real _beta_safety_ = 0.8; int numEDO = NEQ; real rDY[numEDO]; real _tolerances_[numEDO]; real _aux_tol = 0.0; //initializes the variables ode_previous_dt[sv_id] = ode_dt[sv_id]; real edos_old_aux_[numEDO]; real edos_new_euler_[numEDO]; real *_k1__ = (real*) malloc(sizeof(real)*numEDO); real *_k2__ = (real*) malloc(sizeof(real)*numEDO); real *_k_aux__; real *dt = &ode_dt[sv_id]; real *time_new = &ode_time_new[sv_id]; real *previous_dt = &ode_previous_dt[sv_id]; if(*time_new + *dt > final_time) { *dt = final_time - *time_new; } RHS_cpu(sv, rDY, stim_curr, *dt); *time_new += *dt; for(int i = 0; i < numEDO; i++){ _k1__[i] = rDY[i]; } const double __tiny_ = pow(abstol, 2.0); int count = 0; int count_limit = (final_time - *time_new)/min_step; int aux_count_limit = count_limit+2000000; if(aux_count_limit > 0) { count_limit = aux_count_limit; } while(1) { for(int i = 0; i < numEDO; i++) { //stores the old variables in a vector edos_old_aux_[i] = sv[i]; //computes euler method edos_new_euler_[i] = _k1__[i] * *dt + edos_old_aux_[i]; //steps ahead to compute the rk2 method sv[i] = edos_new_euler_[i]; } *time_new += *dt; RHS_cpu(sv, rDY, stim_curr, *dt); *time_new -= *dt;//step back double greatestError = 0.0, auxError = 0.0; for(int i = 0; i < numEDO; i++) { //stores the new evaluation _k2__[i] = rDY[i]; _aux_tol = fabs(edos_new_euler_[i])*reltol; _tolerances_[i] = (abstol > _aux_tol )?abstol:_aux_tol; //finds the greatest error between the steps auxError = fabs(( (*dt/2.0)*(_k1__[i] - _k2__[i])) / _tolerances_[i]); greatestError = (auxError > greatestError) ? auxError : greatestError; } ///adapt the time step greatestError += __tiny_; *previous_dt = *dt; ///adapt the time step *dt = _beta_safety_ * (*dt) * sqrt(1.0f/greatestError); if (*time_new + *dt > final_time) { *dt = final_time - *time_new; } //it doesn't accept the solution if ( count < count_limit && (greatestError >= 1.0f)) { //restore the old values to do it again for(int i = 0; i < numEDO; i++) { sv[i] = edos_old_aux_[i]; } count++; //throw the results away and compute again } else{//it accepts the solutions if(greatestError >=1.0) { printf("Accepting solution with error > %lf \n", greatestError); } //printf("%e %e\n", _ode->time_new, edos_new_euler_[0]); if (*dt < min_step) { *dt = min_step; } else if (*dt > max_step && max_step != 0) { *dt = max_step; } if (*time_new + *dt > final_time) { *dt = final_time - *time_new; } _k_aux__ = _k2__; _k2__ = _k1__; _k1__ = _k_aux__; //it steps the method ahead, with euler solution for(int i = 0; i < numEDO; i++){ sv[i] = edos_new_euler_[i]; } if(*time_new + *previous_dt >= final_time){ if((fabs(final_time - *time_new) < 1.0e-5) ){ break; }else if(*time_new < final_time){ *dt = *previous_dt = final_time - *time_new; *time_new += *previous_dt; break; }else{ printf("Error: time_new %.20lf final_time %.20lf diff %e \n", *time_new , final_time, fabs(final_time - *time_new) ); break; } }else{ *time_new += *previous_dt; } } } free(_k1__); free(_k2__); } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { //State variables const real v_old_ = sv[0]; const real CaMKt_old_ = sv[1]; const real nai_old_ = sv[2]; const real nass_old_ = sv[3]; const real ki_old_ = sv[4]; const real kss_old_ = sv[5]; const real cai_old_ = sv[6]; const real cass_old_ = sv[7]; const real cansr_old_ = sv[8]; const real cajsr_old_ = sv[9]; const real m_old_ = sv[10]; const real h_old_ = sv[11]; const real j_old_ = sv[12]; const real hp_old_ = sv[13]; const real jp_old_ = sv[14]; const real mL_old_ = sv[15]; const real hL_old_ = sv[16]; const real hLp_old_ = sv[17]; const real a_old_ = sv[18]; const real iF_old_ = sv[19]; const real iS_old_ = sv[20]; const real ap_old_ = sv[21]; const real iFp_old_ = sv[22]; const real iSp_old_ = sv[23]; const real d_old_ = sv[24]; const real ff_old_ = sv[25]; const real fs_old_ = sv[26]; const real fcaf_old_ = sv[27]; const real fcas_old_ = sv[28]; const real jca_old_ = sv[29]; const real ffp_old_ = sv[30]; const real fcafp_old_ = sv[31]; const real nca_ss_old_ = sv[32]; const real nca_i_old_ = sv[33]; const real C3_old_ = sv[34]; const real C2_old_ = sv[35]; const real C1_old_ = sv[36]; const real O_old_ = sv[37]; const real I_old_ = sv[38]; const real xs1_old_ = sv[39]; const real xs2_old_ = sv[40]; const real Jrel_np_old_ = sv[41]; const real Jrel_p_old_ = sv[42]; #include "ToROrd_common.inc.c" }
main.c
#include "utils.h" #include <omp.h> #include <papi.h> #include <stddef.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #define NUM_EVENTS 4 int EVENTS[NUM_EVENTS] = { PAPI_L2_TCM, PAPI_L3_TCM, PAPI_LD_INS, PAPI_SR_INS }; int EVENTSET = PAPI_NULL; long long VALUES[NUM_EVENTS]; #define SIZE 4000 #define MAX_BLOCK_SIDE 12 float A[SIZE][SIZE]; float B[SIZE][SIZE]; float C[SIZE][SIZE]; // order IJK void dotprod_ijk( size_t size, float a[][size], float b[][size], float c[][size]) { memset(c, 0, size * size * sizeof(float)); for (size_t i = 0; i < size; i++) { for (size_t j = 0; j < size; j++) { for (size_t k = 0; k < size; k++) { c[i][j] += a[i][k] * b[k][j]; } } } } void dotprod_ijk_transposed( size_t size, float a[][size], float b[][size], float c[][size]) { memset(c, 0, size * size * sizeof(float)); transpose_matrix(size, b); for (size_t i = 0; i < size; i++) { for (size_t j = 0; j < size; j++) { for (size_t k = 0; k < size; k++) { c[i][j] += a[i][k] * b[j][k]; } } } } // order IKJ void dotprod_ikj( size_t size, float a[][size], float b[][size], float c[][size]) { memset(c, 0, size * size * sizeof(float)); for (size_t i = 0; i < size; i++) { for (size_t k = 0; k < size; k++) { for (size_t j = 0; j < size; j++) { c[i][j] += a[i][k] * b[k][j]; } } } } // order JKI void dotprod_jki( size_t size, float a[][size], float b[][size], float c[][size]) { memset(c, 0, size * size * sizeof(float)); for (size_t j = 0; j < size; j++) { for (size_t k = 0; k < size; k++) { for (size_t i = 0; i < size; i++) { c[i][j] += a[i][k] * b[k][j]; } } } } void dotprod_jki_transposed( size_t size, float a[][size], float b[][size], float c[][size]) { memset(c, 0, size * size * sizeof(float)); transpose_matrix(size, a); transpose_matrix(size, b); for (size_t j = 0; j < size; j++) { for (size_t k = 0; k < size; k++) { for (size_t i = 0; i < size; i++) { c[j][i] += a[k][i] * b[j][k]; } } } transpose_matrix(size, c); } // block optimization void dotprod_ijk_block( size_t size, float a[][size], float b[][size], float c[][size]) { memset(c, 0, size * size * sizeof(float)); size_t c_block_size = SIZE > MAX_BLOCK_SIDE ? SIZE / (SIZE / MAX_BLOCK_SIDE) : SIZE; transpose_matrix(size, b); for (size_t bk = 0; bk < size; bk += c_block_size) { for (size_t bj = 0; bj < size; bj += c_block_size) { for (size_t i = 0; i < size; i++) { for (size_t j = bj; j < bj + c_block_size; j++) { for (size_t k = bk; k < bk + c_block_size; k++) { c[i][j] += a[i][k] * b[j][k]; } } } } } } void dotprod_ijk_block_vec( size_t size, float a[__restrict__][size], float b[__restrict__][size], float c[__restrict__][size]) { memset(c, 0, size * size * sizeof(float)); size_t c_block_size = SIZE > MAX_BLOCK_SIDE ? SIZE / (SIZE / MAX_BLOCK_SIDE) : SIZE; transpose_matrix(size, b); for (size_t bk = 0; bk < size; bk += c_block_size) { for (size_t bj = 0; bj < size; bj += c_block_size) { for (size_t i = 0; i < size; i++) { for (size_t j = bj; j < bj + c_block_size; j++) { float tmp = 0; for (size_t k = bk; k < bk + c_block_size; k++) { tmp += a[i][k] * b[j][k]; } c[i][j] += tmp; } } } } } void dotprod_ijk_block_vec_openmp( size_t size, float a[__restrict__][size], float b[__restrict__][size], float c[__restrict__][size]) { memset(c, 0, size * size * sizeof(float)); size_t c_block_size = SIZE > MAX_BLOCK_SIDE ? SIZE / (SIZE / MAX_BLOCK_SIDE) : SIZE; transpose_matrix(size, b); for (size_t bk = 0; bk < size; bk += c_block_size) { for (size_t bj = 0; bj < size; bj += c_block_size) { #pragma omp parallel for for (size_t i = 0; i < size; i++) { for (size_t j = bj; j < bj + c_block_size; j++) { float tmp = 0; for (size_t k = bk; k < bk + c_block_size; k++) { tmp += a[i][k] * b[j][k]; } c[i][j] += tmp; } } } } } void run_with_PAPI( char* name, void f(size_t size, float m1[][size], float m2[][size], float r[][size])) { printf(">%s\n", name); PAPI_start(EVENTSET); f(SIZE, A, B, C); PAPI_stop(EVENTSET, VALUES); assert_result_lines(SIZE, C); clear_cache(); start_timer(); f(SIZE, B, A, C); long long unsigned time = stop_timer(); assert_result_collums(SIZE, C); printf("TIME:%llu\n", time); char* event_name = (char*) malloc(sizeof(char) * PAPI_MAX_STR_LEN); for (size_t i = 0; i < NUM_EVENTS; i++) { PAPI_event_code_to_name(EVENTS[i], event_name); printf("EVENT %s\t%lld\n", event_name, VALUES[i]); } putchar('\n'); free(event_name); PAPI_reset(EVENTSET); clear_cache(); } int main(void) { // Initialize PAPI PAPI_library_init(PAPI_VER_CURRENT); PAPI_create_eventset(&EVENTSET); PAPI_add_events(EVENTSET, EVENTS, NUM_EVENTS); // Initialize matrices matrices_rand_ones(SIZE, A, B); // IJK run_with_PAPI("ijk", dotprod_ijk); run_with_PAPI("ijk transposed", dotprod_ijk_transposed); // IKJ /* run_with_PAPI("ikj", dotprod_ikj); */ // JKI run_with_PAPI("jki", dotprod_jki); run_with_PAPI("jki transposed", dotprod_jki_transposed); // BLOCK /* run_with_PAPI("ijk block", dotprod_ijk_block); */ /* run_with_PAPI("ijk block & vec", dotprod_ijk_block_vec); */ /* run_with_PAPI("ijk block & vec & OpenMP", dotprod_ijk_block_vec_openmp); */ return 0; }
additional_algorithms.h
#ifndef additional_algorithms__h__ #define additional_algorithms__h__ //#include <boost/sort/spreadsort/integer_sort.hpp> //boost::sort::spreadsort::integer_sort(&job.in[job.normal_sorting_begin],&job.in[job.normal_sorting_end]); #include <aRibeiroCore/aRibeiroCore.h> using namespace aRibeiro; // // Cormen Recursive Merge Sort Implementation // static void Cormen_MergeStep(uint32_t *A, int p, int q, int r, uint32_t* pre_alloc_tmp) { int n1 = q-p+1;//p->q int n2 = r-q;//q->r //uint32_t* L = (uint32_t*)malloc_aligned( (n1+1)*sizeof(uint32_t) ); //uint32_t* R = (uint32_t*)malloc_aligned( (n2+1)*sizeof(uint32_t) ); uint32_t* L = pre_alloc_tmp; uint32_t* R = pre_alloc_tmp + n1 + 1; //for(int i=0;i<n1;i++) L[i] = A[p+i]; //for(int j=0;j<n2;j++) R[i] = A[q+j+1]; memcpy(L,A+p,n1*sizeof(uint32_t)); memcpy(R,A+q+1,n2*sizeof(uint32_t)); L[n1] = UINT32_MAX; R[n2] = UINT32_MAX; int i=0; int j=0; for(int k=p;k<=r;k++){ if ( L[i]<=R[j] ){ A[k] = L[i]; i++; } else { A[k] = R[j]; j++; } } //free_aligned(L); //free_aligned(R); } static void Cormen_MergeRecursiveStep(uint32_t*_array, int p, int r, uint32_t* pre_alloc_tmp) { if (p<r){ int q = (p+r) >> 1; Cormen_MergeRecursiveStep(_array, p, q, pre_alloc_tmp); Cormen_MergeRecursiveStep(_array, q+1, r, pre_alloc_tmp); Cormen_MergeStep(_array, p, q, r, pre_alloc_tmp); } } static void Cormen_Recursive_MergeSort(uint32_t*_array, int size, uint32_t* pre_alloc_tmp) { int p = 0; int r = size-1; Cormen_MergeRecursiveStep(_array, p, r, pre_alloc_tmp); } // // Non-Recursive Merge Sort Implementation // template<typename T> static void NonRecursive_MergeSort(T*_array, int size, T*pre_alloc_tmp = NULL) { T *_aux; if (pre_alloc_tmp == NULL) _aux = (T *)malloc_aligned( sizeof(T)*size ); else _aux = pre_alloc_tmp; T *in = _array; T *out = _aux; int element_count = 1; while (element_count < size){ //#pragma omp parallel for for(int i=0;i<size;i+=element_count<<1){ int write_index = i; int write_max = i + (element_count<<1); if (write_max > size) write_max = size; int a_index = i; int b_index = i+element_count; int a_max = b_index; int b_max = b_index + element_count; if (a_max > size) a_max = size; if (b_max > size) b_max = size; while (write_index < write_max && a_index < a_max && b_index < b_max) { const T& _a = in[a_index]; const T& _b = in[b_index]; if (_a > _b){ out[write_index] = _b; b_index++; } else { out[write_index] = _a; a_index++; } write_index++; } while (a_index<a_max) { out[write_index++] = in[a_index++]; } while (b_index<b_max) { out[write_index++] = in[b_index++]; } } //swap in/out T* aux = in; in = out; out = aux; element_count = element_count << 1; } if (in != _array) memcpy(_array, in, sizeof(T)*size); if (pre_alloc_tmp == NULL) free_aligned(_aux); } template<typename T> static void NonRecursive_MergeSort2(T* _array, int size, T* pre_alloc_tmp = NULL) { T* _aux; if (pre_alloc_tmp == NULL) _aux = (T*)malloc_aligned(sizeof(T) * size); else _aux = pre_alloc_tmp; int element_count = 1; while (element_count < size) { //#pragma omp parallel for for (int i = 0; i < size; i += element_count << 1) { int write_index = i; int write_max = i + (element_count << 1); if (write_max > size) write_max = size; int a_index = i; int b_index = i + element_count; int a_max = b_index; int b_max = b_index + element_count; if (a_max > size) a_max = size; if (b_max > size) b_max = size; T* arr_a = _aux; T* arr_b = _aux + (a_max - a_index); if ((a_max - a_index) > 0) memcpy(arr_a, _array + a_index, (a_max - a_index) * sizeof(T)); if ((b_max - b_index)>0) memcpy(arr_b, _array + b_index, (b_max - b_index) * sizeof(T)); a_max = (a_max - a_index); b_max = (b_max - b_index); a_index = 0; b_index = 0; while (write_index < write_max && a_index < a_max && b_index < b_max) { const T& _a = arr_a[a_index]; const T& _b = arr_b[b_index]; if (_a > _b) { _array[write_index] = _b; b_index++; } else { _array[write_index] = _a; a_index++; } write_index++; } while (a_index < a_max) { _array[write_index++] = arr_a[a_index++]; } while (b_index < b_max) { _array[write_index++] = arr_b[b_index++]; } } element_count = element_count << 1; } if (pre_alloc_tmp == NULL) free_aligned(_aux); } #endif
UMESimdCastOperators.h
// The MIT License (MIT) // // Copyright (c) 2015-2017 CERN // // Author: Przemyslaw Karpinski // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // // // This piece of code was developed as part of ICE-DIP project at CERN. // "ICE-DIP is a European Industrial Doctorate project funded by the European Community's // 7th Framework programme Marie Curie Actions under grant PITN-GA-2012-316596". // #ifndef UME_SIMD_CAST_OPERATORS_H_ #define UME_SIMD_CAST_OPERATORS_H_ #include "UMESimdVecUint.h" #include "UMESimdVecInt.h" #include "UMESimdVecFloat.h" namespace UME { namespace SIMD { // Operators for non-specialized types require 'template<>' syntax. // Compliant compiler will not accept this syntax for non-specialized // types, so make sure only proper definitions have it. // UTOI template<> inline SIMDVec_u<uint8_t, 1>::operator SIMDVec_i<int8_t, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int8_t, 1>, int8_t, SIMDVec_u<uint8_t, 1>>(*this); } template<> inline SIMDVec_u<uint8_t, 2>::operator SIMDVec_i<int8_t, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int8_t, 2>, int8_t, SIMDVec_u<uint8_t, 2>>(*this); } template<> inline SIMDVec_u<uint8_t, 4>::operator SIMDVec_i<int8_t, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int8_t, 4>, int8_t, SIMDVec_u<uint8_t, 4>>(*this); } template<> inline SIMDVec_u<uint8_t, 8>::operator SIMDVec_i<int8_t, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int8_t, 8>, int8_t, SIMDVec_u<uint8_t, 8>>(*this); } template<> inline SIMDVec_u<uint8_t, 16>::operator SIMDVec_i<int8_t, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int8_t, 16>, int8_t, SIMDVec_u<uint8_t, 16>>(*this); } template<> inline SIMDVec_u<uint8_t, 32>::operator SIMDVec_i<int8_t, 32>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int8_t, 32>, int8_t, SIMDVec_u<uint8_t, 32>>(*this); } template<> inline SIMDVec_u<uint8_t, 64>::operator SIMDVec_i<int8_t, 64>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int8_t, 64>, int8_t, SIMDVec_u<uint8_t, 64>>(*this); } template<> inline SIMDVec_u<uint8_t, 128>::operator SIMDVec_i<int8_t, 128>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int8_t, 128>, int8_t, SIMDVec_u<uint8_t, 128>>(*this); } template<> inline SIMDVec_u<uint16_t, 1>::operator SIMDVec_i<int16_t, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 1>, int16_t, SIMDVec_u<uint16_t, 1>>(*this); } template<> inline SIMDVec_u<uint16_t, 2>::operator SIMDVec_i<int16_t, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 2>, int16_t, SIMDVec_u<uint16_t, 2>>(*this); } template<> inline SIMDVec_u<uint16_t, 4>::operator SIMDVec_i<int16_t, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 4>, int16_t, SIMDVec_u<uint16_t, 4>>(*this); } template<> inline SIMDVec_u<uint16_t, 8>::operator SIMDVec_i<int16_t, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 8>, int16_t, SIMDVec_u<uint16_t, 8>>(*this); } template<> inline SIMDVec_u<uint16_t, 16>::operator SIMDVec_i<int16_t, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 16>, int16_t, SIMDVec_u<uint16_t, 16>>(*this); } template<> inline SIMDVec_u<uint16_t, 32>::operator SIMDVec_i<int16_t, 32>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 32>, int16_t, SIMDVec_u<uint16_t, 32>>(*this); } template<> inline SIMDVec_u<uint16_t, 64>::operator SIMDVec_i<int16_t, 64>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 64>, int16_t, SIMDVec_u<uint16_t, 64>>(*this); } template<> inline SIMDVec_u<uint32_t, 1>::operator SIMDVec_i<int32_t, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int32_t, 1>, int32_t, SIMDVec_u<uint32_t, 1>>(*this); } template<> inline SIMDVec_u<uint32_t, 2>::operator SIMDVec_i<int32_t, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int32_t, 2>, int32_t, SIMDVec_u<uint32_t, 2>>(*this); } template<> inline SIMDVec_u<uint32_t, 4>::operator SIMDVec_i<int32_t, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int32_t, 4>, int32_t, SIMDVec_u<uint32_t, 4>>(*this); } template<> inline SIMDVec_u<uint32_t, 8>::operator SIMDVec_i<int32_t, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int32_t, 8>, int32_t, SIMDVec_u<uint32_t, 8>>(*this); } template<> inline SIMDVec_u<uint32_t, 16>::operator SIMDVec_i<int32_t, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int32_t, 16>, int32_t, SIMDVec_u<uint32_t, 16>>(*this); } template<> inline SIMDVec_u<uint32_t, 32>::operator SIMDVec_i<int32_t, 32>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int32_t, 32>, int32_t, SIMDVec_u<uint32_t, 32>>(*this); } template<> inline SIMDVec_u<uint64_t, 1>::operator SIMDVec_i<int64_t, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int64_t, 1>, int64_t, SIMDVec_u<uint64_t, 1>>(*this); } template<> inline SIMDVec_u<uint64_t, 2>::operator SIMDVec_i<int64_t, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int64_t, 2>, int64_t, SIMDVec_u<uint64_t, 2>>(*this); } template<> inline SIMDVec_u<uint64_t, 4>::operator SIMDVec_i<int64_t, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int64_t, 4>, int64_t, SIMDVec_u<uint64_t, 4>>(*this); } template<> inline SIMDVec_u<uint64_t, 8>::operator SIMDVec_i<int64_t, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int64_t, 8>, int64_t, SIMDVec_u<uint64_t, 8>>(*this); } template<> inline SIMDVec_u<uint64_t, 16>::operator SIMDVec_i<int64_t, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int64_t, 16>, int64_t, SIMDVec_u<uint64_t, 16>>(*this); } // UTOF template<> inline SIMDVec_u<uint32_t, 1>::operator SIMDVec_f<float, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<float, 1>, float, SIMDVec_u<uint32_t, 1>>(*this); } template<> inline SIMDVec_u<uint32_t, 2>::operator SIMDVec_f<float, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<float, 2>, float, SIMDVec_u<uint32_t, 2>>(*this); } template<> inline SIMDVec_u<uint32_t, 4>::operator SIMDVec_f<float, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<float, 4>, float, SIMDVec_u<uint32_t, 4>>(*this); } template<> inline SIMDVec_u<uint32_t, 8>::operator SIMDVec_f<float, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<float, 8>, float, SIMDVec_u<uint32_t, 8>>(*this); } template<> inline SIMDVec_u<uint32_t, 16>::operator SIMDVec_f<float, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<float, 16>, float, SIMDVec_u<uint32_t, 16>>(*this); } template<> inline SIMDVec_u<uint32_t, 32>::operator SIMDVec_f<float, 32>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<float, 32>, float, SIMDVec_u<uint32_t, 32>>(*this); } template<> inline SIMDVec_u<uint64_t, 1>::operator SIMDVec_f<double, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<double, 1>, double, SIMDVec_u<uint64_t, 1>>(*this); } template<> inline SIMDVec_u<uint64_t, 2>::operator SIMDVec_f<double, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<double, 2>, double, SIMDVec_u<uint64_t, 2>>(*this); } template<> inline SIMDVec_u<uint64_t, 4>::operator SIMDVec_f<double, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<double, 4>, double, SIMDVec_u<uint64_t, 4>>(*this); } template<> inline SIMDVec_u<uint64_t, 8>::operator SIMDVec_f<double, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<double, 8>, double, SIMDVec_u<uint64_t, 8>>(*this); } template<> inline SIMDVec_u<uint64_t, 16>::operator SIMDVec_f<double, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<double, 16>, double, SIMDVec_u<uint64_t, 16>>(*this); } // ITOU template<> inline SIMDVec_i<int8_t, 1>::operator SIMDVec_u<uint8_t, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint8_t, 1>, uint8_t, SIMDVec_i<int8_t, 1>>(*this); } template<> inline SIMDVec_i<int8_t, 2>::operator SIMDVec_u<uint8_t, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint8_t, 2>, uint8_t, SIMDVec_i<int8_t, 2>>(*this); } template<> inline SIMDVec_i<int8_t, 4>::operator SIMDVec_u<uint8_t, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint8_t, 4>, uint8_t, SIMDVec_i<int8_t, 4>>(*this); } template<> inline SIMDVec_i<int8_t, 8>::operator SIMDVec_u<uint8_t, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint8_t, 8>, uint8_t, SIMDVec_i<int8_t, 8>>(*this); } template<> inline SIMDVec_i<int8_t, 16>::operator SIMDVec_u<uint8_t, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint8_t, 16>, uint8_t, SIMDVec_i<int8_t, 16>>(*this); } template<> inline SIMDVec_i<int8_t, 32>::operator SIMDVec_u<uint8_t, 32>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint8_t, 32>, uint8_t, SIMDVec_i<int8_t, 32>>(*this); } template<> inline SIMDVec_i<int8_t, 64>::operator SIMDVec_u<uint8_t, 64>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint8_t, 64>, uint8_t, SIMDVec_i<int8_t, 64>>(*this); } template<> inline SIMDVec_i<int8_t, 128>::operator SIMDVec_u<uint8_t, 128>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint8_t, 128>, uint8_t, SIMDVec_i<int8_t, 128>>(*this); } template<> inline SIMDVec_i<int16_t, 1>::operator SIMDVec_u<uint16_t, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 1>, uint16_t, SIMDVec_i<int16_t, 1>>(*this); } template<> inline SIMDVec_i<int16_t, 2>::operator SIMDVec_u<uint16_t, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 2>, uint16_t, SIMDVec_i<int16_t, 2>>(*this); } template<> inline SIMDVec_i<int16_t, 4>::operator SIMDVec_u<uint16_t, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 4>, uint16_t, SIMDVec_i<int16_t, 4>>(*this); } template<> inline SIMDVec_i<int16_t, 8>::operator SIMDVec_u<uint16_t, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 8>, uint16_t, SIMDVec_i<int16_t, 8>>(*this); } template<> inline SIMDVec_i<int16_t, 16>::operator SIMDVec_u<uint16_t, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 16>, uint16_t, SIMDVec_i<int16_t, 16>>(*this); } template<> inline SIMDVec_i<int16_t, 32>::operator SIMDVec_u<uint16_t, 32>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 32>, uint32_t, SIMDVec_i<int16_t, 32>>(*this); } template<> inline SIMDVec_i<int16_t, 64>::operator SIMDVec_u<uint16_t, 64>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 64>, uint32_t, SIMDVec_i<int16_t, 64>>(*this); } template<> inline SIMDVec_i<int32_t, 1>::operator SIMDVec_u<uint32_t, 1>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint32_t, 1>, uint32_t, SIMDVec_i<int32_t, 1>>(*this); } template<> inline SIMDVec_i<int32_t, 2>::operator SIMDVec_u<uint32_t, 2>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint32_t, 2>, uint32_t, SIMDVec_i<int32_t, 2>>(*this); } template<> inline SIMDVec_i<int32_t, 4>::operator SIMDVec_u<uint32_t, 4>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint32_t, 4>, uint32_t, SIMDVec_i<int32_t, 4>>(*this); } template<> inline SIMDVec_i<int32_t, 8>::operator SIMDVec_u<uint32_t, 8>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint32_t, 8>, uint32_t, SIMDVec_i<int32_t, 8>>(*this); } template<> inline SIMDVec_i<int32_t, 16>::operator SIMDVec_u<uint32_t, 16>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint32_t, 16>, uint32_t, SIMDVec_i<int32_t, 16>>(*this); } template<> inline SIMDVec_i<int32_t, 32>::operator SIMDVec_u<uint32_t, 32>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint32_t, 32>, uint32_t, SIMDVec_i<int32_t, 32>>(*this); } template<> inline SIMDVec_i<int64_t, 1>::operator SIMDVec_u<uint64_t, 1>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint64_t, 1>, uint64_t, SIMDVec_i<int64_t, 1>>(*this); } template<> inline SIMDVec_i<int64_t, 2>::operator SIMDVec_u<uint64_t, 2>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint64_t, 2>, uint64_t, SIMDVec_i<int64_t, 2>>(*this); } template<> inline SIMDVec_i<int64_t, 4>::operator SIMDVec_u<uint64_t, 4>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint64_t, 4>, uint64_t, SIMDVec_i<int64_t, 4>>(*this); } template<> inline SIMDVec_i<int64_t, 8>::operator SIMDVec_u<uint64_t, 8>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint64_t, 8>, uint64_t, SIMDVec_i<int64_t, 8>>(*this); } template<> inline SIMDVec_i<int64_t, 16>::operator SIMDVec_u<uint64_t, 16>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint64_t, 16>, uint64_t, SIMDVec_i<int64_t, 16>>(*this); } // ITOF template<> inline SIMDVec_i<int32_t, 1>::operator SIMDVec_f<float, 1>() const { return SCALAR_EMULATION::xtoy < SIMDVec_f<float, 1>, float, SIMDVec_i<int32_t, 1>>(*this); } template<> inline SIMDVec_i<int32_t, 2>::operator SIMDVec_f<float, 2>() const { return SCALAR_EMULATION::xtoy < SIMDVec_f<float, 2>, float, SIMDVec_i<int32_t, 2>>(*this); } template<> inline SIMDVec_i<int32_t, 4>::operator SIMDVec_f<float, 4>() const { return SCALAR_EMULATION::xtoy < SIMDVec_f<float, 4>, float, SIMDVec_i<int32_t, 4>>(*this); } template<> inline SIMDVec_i<int32_t, 8>::operator SIMDVec_f<float, 8>() const { return SCALAR_EMULATION::xtoy < SIMDVec_f<float, 8>, float, SIMDVec_i<int32_t, 8>>(*this); } template<> inline SIMDVec_i<int32_t, 16>::operator SIMDVec_f<float, 16>() const { return SCALAR_EMULATION::xtoy < SIMDVec_f<float, 16>, float, SIMDVec_i<int32_t, 16>>(*this); } template<> inline SIMDVec_i<int32_t, 32>::operator SIMDVec_f<float, 32>() const { return SCALAR_EMULATION::xtoy < SIMDVec_f<float, 32>, float, SIMDVec_i<int32_t, 32>>(*this); } template<> inline SIMDVec_i<int64_t, 1>::operator SIMDVec_f<double, 1>() const { return SCALAR_EMULATION::xtoy < SIMDVec_f<double, 1>, double, SIMDVec_i<int64_t, 1>>(*this); } template<> inline SIMDVec_i<int64_t, 2>::operator SIMDVec_f<double, 2>() const { return SCALAR_EMULATION::xtoy < SIMDVec_f<double, 2>, double, SIMDVec_i<int64_t, 2>>(*this); } template<> inline SIMDVec_i<int64_t, 4>::operator SIMDVec_f<double, 4>() const { return SCALAR_EMULATION::xtoy < SIMDVec_f<double, 4>, double, SIMDVec_i<int64_t, 4>>(*this); } template<> inline SIMDVec_i<int64_t, 8>::operator SIMDVec_f<double, 8>() const { return SCALAR_EMULATION::xtoy < SIMDVec_f<double, 8>, double, SIMDVec_i<int64_t, 8>>(*this); } template<> inline SIMDVec_i<int64_t, 16>::operator SIMDVec_f<double, 16>() const { return SCALAR_EMULATION::xtoy < SIMDVec_f<double, 16>, double, SIMDVec_i<int64_t, 16>>(*this); } // FTOU template<> inline SIMDVec_f<float, 1>::operator SIMDVec_u<uint32_t, 1>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint32_t, 1>, uint32_t, SIMDVec_f<float, 1>>(*this); } template<> inline SIMDVec_f<float, 2>::operator SIMDVec_u<uint32_t, 2>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint32_t, 2>, uint32_t, SIMDVec_f<float, 2>>(*this); } template<> inline SIMDVec_f<float, 4>::operator SIMDVec_u<uint32_t, 4>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint32_t, 4>, uint32_t, SIMDVec_f<float, 4>>(*this); } template<> inline SIMDVec_f<float, 8>::operator SIMDVec_u<uint32_t, 8>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint32_t, 8>, uint32_t, SIMDVec_f<float, 8>>(*this); } template<> inline SIMDVec_f<float, 16>::operator SIMDVec_u<uint32_t, 16>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint32_t, 16>, uint32_t, SIMDVec_f<float, 16>>(*this); } template<> inline SIMDVec_f<float, 32>::operator SIMDVec_u<uint32_t, 32>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint32_t, 32>, uint32_t, SIMDVec_f<float, 32>>(*this); } template<> inline SIMDVec_f<double, 1>::operator SIMDVec_u<uint64_t, 1>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint64_t, 1>, uint64_t, SIMDVec_f<double, 1>>(*this); } template<> inline SIMDVec_f<double, 2>::operator SIMDVec_u<uint64_t, 2>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint64_t, 2>, uint64_t, SIMDVec_f<double, 2>>(*this); } template<> inline SIMDVec_f<double, 4>::operator SIMDVec_u<uint64_t, 4>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint64_t, 4>, uint64_t, SIMDVec_f<double, 4>>(*this); } template<> inline SIMDVec_f<double, 8>::operator SIMDVec_u<uint64_t, 8>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint64_t, 8>, uint64_t, SIMDVec_f<double, 8>>(*this); } template<> inline SIMDVec_f<double, 16>::operator SIMDVec_u<uint64_t, 16>() const { return SCALAR_EMULATION::xtoy < SIMDVec_u<uint64_t, 16>, uint64_t, SIMDVec_f<double, 16>>(*this); } // FTOI template<> inline SIMDVec_f<float, 1>::operator SIMDVec_i<int32_t, 1>() const { return SCALAR_EMULATION::xtoy < SIMDVec_i<int32_t, 1>, int32_t, SIMDVec_f<float, 1>>(*this); } template<> inline SIMDVec_f<float, 2>::operator SIMDVec_i<int32_t, 2>() const { return SCALAR_EMULATION::xtoy < SIMDVec_i<int32_t, 2>, int32_t, SIMDVec_f<float, 2>>(*this); } template<> inline SIMDVec_f<float, 4>::operator SIMDVec_i<int32_t, 4>() const { return SCALAR_EMULATION::xtoy < SIMDVec_i<int32_t, 4>, int32_t, SIMDVec_f<float, 4>>(*this); } template<> inline SIMDVec_f<float, 8>::operator SIMDVec_i<int32_t, 8>() const { return SCALAR_EMULATION::xtoy < SIMDVec_i<int32_t, 8>, int32_t, SIMDVec_f<float, 8>>(*this); /* SIMDVec_i<int32_t, 8> retval; int32_t * local_retval_ptr = &retval.mVec[0]; float const * local_ptr = &mVec[0]; #pragma omp simd aligned(local_ptr:32, local_retval_ptr:32) simdlen(8) safelen(8) for(int i = 0; i < 8; i++) { local_retval_ptr[i] = int32_t(local_ptr[i]); } return retval;*/ } template<> inline SIMDVec_f<float, 16>::operator SIMDVec_i<int32_t, 16>() const { return SCALAR_EMULATION::xtoy < SIMDVec_i<int32_t, 16>, int32_t, SIMDVec_f<float, 16>>(*this); } template<> inline SIMDVec_f<float, 32>::operator SIMDVec_i<int32_t, 32>() const { return SCALAR_EMULATION::xtoy < SIMDVec_i<int32_t, 32>, int32_t, SIMDVec_f<float, 32>>(*this); } template<> inline SIMDVec_f<double, 1>::operator SIMDVec_i<int64_t, 1>() const { return SCALAR_EMULATION::xtoy < SIMDVec_i<int64_t, 1>, int64_t, SIMDVec_f<double, 1>>(*this); } template<> inline SIMDVec_f<double, 2>::operator SIMDVec_i<int64_t, 2>() const { return SCALAR_EMULATION::xtoy < SIMDVec_i<int64_t, 2>, int64_t, SIMDVec_f<double, 2>>(*this); } template<> inline SIMDVec_f<double, 4>::operator SIMDVec_i<int64_t, 4>() const { return SCALAR_EMULATION::xtoy < SIMDVec_i<int64_t, 4>, int64_t, SIMDVec_f<double, 4>>(*this); } template<> inline SIMDVec_f<double, 8>::operator SIMDVec_i<int64_t, 8>() const { return SCALAR_EMULATION::xtoy < SIMDVec_i<int64_t, 8>, int64_t, SIMDVec_f<double, 8>>(*this); } template<> inline SIMDVec_f<double, 16>::operator SIMDVec_i<int64_t, 16>() const { return SCALAR_EMULATION::xtoy < SIMDVec_i<int64_t, 16>, int64_t, SIMDVec_f<double, 16>>(*this); } // PROMOTE template<> inline SIMDVec_u<uint8_t, 1>::operator SIMDVec_u<uint16_t, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 1>, uint16_t, SIMDVec_u<uint8_t, 1>>(*this); } template<> inline SIMDVec_u<uint8_t, 2>::operator SIMDVec_u<uint16_t, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 2>, uint16_t, SIMDVec_u<uint8_t, 2>>(*this); } template<> inline SIMDVec_u<uint8_t, 4>::operator SIMDVec_u<uint16_t, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 4>, uint16_t, SIMDVec_u<uint8_t, 4>>(*this); } template<> inline SIMDVec_u<uint8_t, 8>::operator SIMDVec_u<uint16_t, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 8>, uint16_t, SIMDVec_u<uint8_t, 8>>(*this); } template<> inline SIMDVec_u<uint8_t, 16>::operator SIMDVec_u<uint16_t, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 16>, uint16_t, SIMDVec_u<uint8_t, 16>>(*this); } template<> inline SIMDVec_u<uint8_t, 32>::operator SIMDVec_u<uint16_t, 32>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 32>, uint16_t, SIMDVec_u<uint8_t, 32>>(*this); } template<> inline SIMDVec_u<uint8_t, 64>::operator SIMDVec_u<uint16_t, 64>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 64>, uint16_t, SIMDVec_u<uint8_t, 64>>(*this); } template<> inline SIMDVec_u<uint16_t, 1>::operator SIMDVec_u<uint32_t, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint32_t, 1>, uint32_t, SIMDVec_u<uint16_t, 1>>(*this); } template<> inline SIMDVec_u<uint16_t, 2>::operator SIMDVec_u<uint32_t, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint32_t, 2>, uint32_t, SIMDVec_u<uint16_t, 2>>(*this); } template<> inline SIMDVec_u<uint16_t, 4>::operator SIMDVec_u<uint32_t, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint32_t, 4>, uint32_t, SIMDVec_u<uint16_t, 4>>(*this); } template<> inline SIMDVec_u<uint16_t, 8>::operator SIMDVec_u<uint32_t, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint32_t, 8>, uint32_t, SIMDVec_u<uint16_t, 8>>(*this); } template<> inline SIMDVec_u<uint16_t, 16>::operator SIMDVec_u<uint32_t, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint32_t, 16>, uint32_t, SIMDVec_u<uint16_t, 16>>(*this); } template<> inline SIMDVec_u<uint16_t, 32>::operator SIMDVec_u<uint32_t, 32>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint32_t, 32>, uint32_t, SIMDVec_u<uint16_t, 32>>(*this); } template<> inline SIMDVec_u<uint32_t, 1>::operator SIMDVec_u<uint64_t, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint64_t, 1>, uint64_t, SIMDVec_u<uint32_t, 1>>(*this); } template<> inline SIMDVec_u<uint32_t, 2>::operator SIMDVec_u<uint64_t, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint64_t, 2>, uint64_t, SIMDVec_u<uint32_t, 2>>(*this); } template<> inline SIMDVec_u<uint32_t, 4>::operator SIMDVec_u<uint64_t, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint64_t, 4>, uint64_t, SIMDVec_u<uint32_t, 4>>(*this); } template<> inline SIMDVec_u<uint32_t, 8>::operator SIMDVec_u<uint64_t, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint64_t, 8>, uint64_t, SIMDVec_u<uint32_t, 8>>(*this); } template<> inline SIMDVec_u<uint32_t, 16>::operator SIMDVec_u<uint64_t, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint64_t, 16>, uint64_t, SIMDVec_u<uint32_t, 16>>(*this); } template<> inline SIMDVec_i<int8_t, 1>::operator SIMDVec_i<int16_t, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 1>, int16_t, SIMDVec_i<int8_t, 1>>(*this); } template<> inline SIMDVec_i<int8_t, 2>::operator SIMDVec_i<int16_t, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 2>, int16_t, SIMDVec_i<int8_t, 2>>(*this); } template<> inline SIMDVec_i<int8_t, 4>::operator SIMDVec_i<int16_t, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 4>, int16_t, SIMDVec_i<int8_t, 4>>(*this); } template<> inline SIMDVec_i<int8_t, 8>::operator SIMDVec_i<int16_t, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 8>, int16_t, SIMDVec_i<int8_t, 8>>(*this); } template<> inline SIMDVec_i<int8_t, 16>::operator SIMDVec_i<int16_t, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 16>, int16_t, SIMDVec_i<int8_t, 16>>(*this); } template<> inline SIMDVec_i<int8_t, 32>::operator SIMDVec_i<int16_t, 32>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 32>, int16_t, SIMDVec_i<int8_t, 32>>(*this); } template<> inline SIMDVec_i<int8_t, 64>::operator SIMDVec_i<int16_t, 64>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 64>, int16_t, SIMDVec_i<int8_t, 64>>(*this); } template<> inline SIMDVec_i<int16_t, 1>::operator SIMDVec_i<int32_t, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int32_t, 1>, int32_t, SIMDVec_i<int16_t, 1>>(*this); } template<> inline SIMDVec_i<int16_t, 2>::operator SIMDVec_i<int32_t, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int32_t, 2>, int32_t, SIMDVec_i<int16_t, 2>>(*this); } template<> inline SIMDVec_i<int16_t, 4>::operator SIMDVec_i<int32_t, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int32_t, 4>, int32_t, SIMDVec_i<int16_t, 4>>(*this); } template<> inline SIMDVec_i<int16_t, 8>::operator SIMDVec_i<int32_t, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int32_t, 8>, int32_t, SIMDVec_i<int16_t, 8>>(*this); } template<> inline SIMDVec_i<int16_t, 16>::operator SIMDVec_i<int32_t, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int32_t, 16>, int32_t, SIMDVec_i<int16_t, 16>>(*this); } template<> inline SIMDVec_i<int16_t, 32>::operator SIMDVec_i<int32_t, 32>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int32_t, 32>, int32_t, SIMDVec_i<int16_t, 32>>(*this); } template<> inline SIMDVec_i<int32_t, 1>::operator SIMDVec_i<int64_t, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int64_t, 1>, int64_t, SIMDVec_i<int32_t, 1>>(*this); } template<> inline SIMDVec_i<int32_t, 2>::operator SIMDVec_i<int64_t, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int64_t, 2>, int64_t, SIMDVec_i<int32_t, 2>>(*this); } template<> inline SIMDVec_i<int32_t, 4>::operator SIMDVec_i<int64_t, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int64_t, 4>, int64_t, SIMDVec_i<int32_t, 4>>(*this); } template<> inline SIMDVec_i<int32_t, 8>::operator SIMDVec_i<int64_t, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int64_t, 8>, int64_t, SIMDVec_i<int32_t, 8>>(*this); } template<> inline SIMDVec_i<int32_t, 16>::operator SIMDVec_i<int64_t, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int64_t, 16>, int64_t, SIMDVec_i<int32_t, 16>>(*this); } template<> inline SIMDVec_f<float, 1>::operator SIMDVec_f<double, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<double, 1>, double, SIMDVec_f<float, 1>>(*this); } template<> inline SIMDVec_f<float, 2>::operator SIMDVec_f<double, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<double, 2>, double, SIMDVec_f<float, 2>>(*this); } template<> inline SIMDVec_f<float, 4>::operator SIMDVec_f<double, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<double, 4>, double, SIMDVec_f<float, 4>>(*this); } template<> inline SIMDVec_f<float, 8>::operator SIMDVec_f<double, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<double, 8>, double, SIMDVec_f<float, 8>>(*this); } template<> inline SIMDVec_f<float, 16>::operator SIMDVec_f<double, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<double, 16>, double, SIMDVec_f<float, 16>>(*this); } // DEGRADE template<> inline SIMDVec_u<uint16_t, 1>::operator SIMDVec_u<uint8_t, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint8_t, 1>, uint8_t, SIMDVec_u<uint16_t, 1>>(*this); } template<> inline SIMDVec_u<uint16_t, 2>::operator SIMDVec_u<uint8_t, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint8_t, 2>, uint8_t, SIMDVec_u<uint16_t, 2>>(*this); } template<> inline SIMDVec_u<uint16_t, 4>::operator SIMDVec_u<uint8_t, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint8_t, 4>, uint8_t, SIMDVec_u<uint16_t, 4>>(*this); } template<> inline SIMDVec_u<uint16_t, 8>::operator SIMDVec_u<uint8_t, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint8_t, 8>, uint8_t, SIMDVec_u<uint16_t, 8>>(*this); } template<> inline SIMDVec_u<uint16_t, 16>::operator SIMDVec_u<uint8_t, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint8_t, 16>, uint8_t, SIMDVec_u<uint16_t, 16>>(*this); } template<> inline SIMDVec_u<uint16_t, 32>::operator SIMDVec_u<uint8_t, 32>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint8_t, 32>, uint8_t, SIMDVec_u<uint16_t, 32>>(*this); } template<> inline SIMDVec_u<uint16_t, 64>::operator SIMDVec_u<uint8_t, 64>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint8_t, 64>, uint8_t, SIMDVec_u<uint16_t, 64>>(*this); } template<> inline SIMDVec_u<uint32_t, 1>::operator SIMDVec_u<uint16_t, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 1>, uint16_t, SIMDVec_u<uint32_t, 1>>(*this); } template<> inline SIMDVec_u<uint32_t, 2>::operator SIMDVec_u<uint16_t, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 2>, uint16_t, SIMDVec_u<uint32_t, 2>>(*this); } template<> inline SIMDVec_u<uint32_t, 4>::operator SIMDVec_u<uint16_t, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 4>, uint16_t, SIMDVec_u<uint32_t, 4>>(*this); } template<> inline SIMDVec_u<uint32_t, 8>::operator SIMDVec_u<uint16_t, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 8>, uint16_t, SIMDVec_u<uint32_t, 8>>(*this); } template<> inline SIMDVec_u<uint32_t, 16>::operator SIMDVec_u<uint16_t, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 16>, uint16_t, SIMDVec_u<uint32_t, 16>>(*this); } template<> inline SIMDVec_u<uint32_t, 32>::operator SIMDVec_u<uint16_t, 32>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint16_t, 32>, uint16_t, SIMDVec_u<uint32_t, 32>>(*this); } template<> inline SIMDVec_u<uint64_t, 1>::operator SIMDVec_u<uint32_t, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint32_t, 1>, uint32_t, SIMDVec_u<uint64_t, 1>>(*this); } template<> inline SIMDVec_u<uint64_t, 2>::operator SIMDVec_u<uint32_t, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint32_t, 2>, uint32_t, SIMDVec_u<uint64_t, 2>>(*this); } template<> inline SIMDVec_u<uint64_t, 4>::operator SIMDVec_u<uint32_t, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint32_t, 4>, uint32_t, SIMDVec_u<uint64_t, 4>>(*this); } template<> inline SIMDVec_u<uint64_t, 8>::operator SIMDVec_u<uint32_t, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint32_t, 8>, uint32_t, SIMDVec_u<uint64_t, 8>>(*this); } template<> inline SIMDVec_u<uint64_t, 16>::operator SIMDVec_u<uint32_t, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_u<uint32_t, 16>, uint32_t, SIMDVec_u<uint64_t, 16>>(*this); } template<> inline SIMDVec_i<int16_t, 1>::operator SIMDVec_i<int8_t, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int8_t, 1>, int8_t, SIMDVec_i<int16_t, 1>>(*this); } template<> inline SIMDVec_i<int16_t, 2>::operator SIMDVec_i<int8_t, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int8_t, 2>, int8_t, SIMDVec_i<int16_t, 2>>(*this); } template<> inline SIMDVec_i<int16_t, 4>::operator SIMDVec_i<int8_t, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int8_t, 4>, int8_t, SIMDVec_i<int16_t, 4>>(*this); } template<> inline SIMDVec_i<int16_t, 8>::operator SIMDVec_i<int8_t, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int8_t, 8>, int8_t, SIMDVec_i<int16_t, 8>>(*this); } template<> inline SIMDVec_i<int16_t, 16>::operator SIMDVec_i<int8_t, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int8_t, 16>, int8_t, SIMDVec_i<int16_t, 16>>(*this); } template<> inline SIMDVec_i<int16_t, 32>::operator SIMDVec_i<int8_t, 32>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int8_t, 32>, int8_t, SIMDVec_i<int16_t, 32>>(*this); } template<> inline SIMDVec_i<int16_t, 64>::operator SIMDVec_i<int8_t, 64>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int8_t, 64>, int8_t, SIMDVec_i<int16_t, 64>>(*this); } template<> inline SIMDVec_i<int32_t, 1>::operator SIMDVec_i<int16_t, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 1>, int16_t, SIMDVec_i<int32_t, 1>>(*this); } template<> inline SIMDVec_i<int32_t, 2>::operator SIMDVec_i<int16_t, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 2>, int16_t, SIMDVec_i<int32_t, 2>>(*this); } template<> inline SIMDVec_i<int32_t, 4>::operator SIMDVec_i<int16_t, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 4>, int16_t, SIMDVec_i<int32_t, 4>>(*this); } template<> inline SIMDVec_i<int32_t, 8>::operator SIMDVec_i<int16_t, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 8>, int16_t, SIMDVec_i<int32_t, 8>>(*this); } template<> inline SIMDVec_i<int32_t, 16>::operator SIMDVec_i<int16_t, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 16>, int16_t, SIMDVec_i<int32_t, 16>>(*this); } template<> inline SIMDVec_i<int32_t, 32>::operator SIMDVec_i<int16_t, 32>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int16_t, 32>, int16_t, SIMDVec_i<int32_t, 32>>(*this); } template<> inline SIMDVec_i<int64_t, 1>::operator SIMDVec_i<int32_t, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int32_t, 1>, int32_t, SIMDVec_i<int64_t, 1>>(*this); } template<> inline SIMDVec_i<int64_t, 2>::operator SIMDVec_i<int32_t, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int32_t, 2>, int32_t, SIMDVec_i<int64_t, 2>>(*this); } template<> inline SIMDVec_i<int64_t, 4>::operator SIMDVec_i<int32_t, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int32_t, 4>, int32_t, SIMDVec_i<int64_t, 4>>(*this); } template<> inline SIMDVec_i<int64_t, 8>::operator SIMDVec_i<int32_t, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int32_t, 8>, int32_t, SIMDVec_i<int64_t, 8>>(*this); } template<> inline SIMDVec_i<int64_t, 16>::operator SIMDVec_i<int32_t, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_i<int32_t, 16>, int32_t, SIMDVec_i<int64_t, 16>>(*this); } template<> inline SIMDVec_f<double, 1>::operator SIMDVec_f<float, 1>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<float, 1>, float, SIMDVec_f<double, 1>>(*this); } template<> inline SIMDVec_f<double, 2>::operator SIMDVec_f<float, 2>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<float, 2>, float, SIMDVec_f<double, 2>>(*this); } template<> inline SIMDVec_f<double, 4>::operator SIMDVec_f<float, 4>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<float, 4>, float, SIMDVec_f<double, 4>>(*this); } template<> inline SIMDVec_f<double, 8>::operator SIMDVec_f<float, 8>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<float, 8>, float, SIMDVec_f<double, 8>>(*this); } template<> inline SIMDVec_f<double, 16>::operator SIMDVec_f<float, 16>() const { return SCALAR_EMULATION::xtoy <SIMDVec_f<float, 16>, float, SIMDVec_f<double, 16>>(*this); } } } #endif
ast-dump-openmp-begin-declare-variant_13.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s // expected-no-diagnostics int also_before(void) { return 1; } #pragma omp begin declare variant match(user = {condition(1)}) int also_after(void) { return 0; } int also_before(void) { return 0; } #pragma omp end declare variant int also_after(void) { return 2; } int test() { // Should return 0. return also_after() + also_before(); } // CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 1 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit user={condition(1)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:13:1> 'int ({{.*}})' {{.*}}Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[user={condition(...)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:10:1, col:20> col:5 implicit used also_after 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit user={condition(1)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' {{.*}}Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[user={condition(...)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:12:1> line:10:1 also_after[user={condition(...)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:12:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:11:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_6]] <line:13:1, line:15:1> line:13:1 also_before[user={condition(...)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:15:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:14:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:18:1, line:20:1> line:18:5 used also_after 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:20:1> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:19:3, col:10> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 2 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit user={condition(1)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:10:1> 'int ({{.*}})' {{.*}}Function [[ADDR_10]] 'also_after[user={condition(...)}]' 'int ({{.*}})' // CHECK-NEXT: `-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:22:1, line:25:1> line:22:5 test 'int ({{.*}})' // CHECK-NEXT: `-CompoundStmt [[ADDR_23:0x[a-z0-9]*]] <col:12, line:25:1> // CHECK-NEXT: `-ReturnStmt [[ADDR_24:0x[a-z0-9]*]] <line:24:3, col:37> // CHECK-NEXT: `-BinaryOperator [[ADDR_25:0x[a-z0-9]*]] <col:10, col:37> 'int' '+' // CHECK-NEXT: |-PseudoObjectExpr [[ADDR_26:0x[a-z0-9]*]] <col:10, col:21> 'int' // CHECK-NEXT: | |-CallExpr [[ADDR_27:0x[a-z0-9]*]] <col:10, col:21> 'int' // CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_28:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | `-DeclRefExpr [[ADDR_29:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})' // CHECK-NEXT: | `-CallExpr [[ADDR_30:0x[a-z0-9]*]] <line:10:1, line:24:21> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_31:0x[a-z0-9]*]] <line:10:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_9]] <col:1> 'int ({{.*}})' {{.*}}Function [[ADDR_10]] 'also_after[user={condition(...)}]' 'int ({{.*}})' // CHECK-NEXT: `-PseudoObjectExpr [[ADDR_32:0x[a-z0-9]*]] <line:24:25, col:37> 'int' // CHECK-NEXT: |-CallExpr [[ADDR_33:0x[a-z0-9]*]] <col:25, col:37> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <col:25> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_35:0x[a-z0-9]*]] <col:25> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})' // CHECK-NEXT: `-CallExpr [[ADDR_36:0x[a-z0-9]*]] <line:13:1, line:24:37> 'int' // CHECK-NEXT: `-ImplicitCastExpr [[ADDR_37:0x[a-z0-9]*]] <line:13:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: `-DeclRefExpr [[ADDR_5]] <col:1> 'int ({{.*}})' {{.*}}Function [[ADDR_6]] 'also_before[user={condition(...)}]' 'int ({{.*}})'
transform.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M % % T R R A A NN N SS F O O R R MM MM % % T RRRR AAAAA N N N SSS FFF O O RRRR M M M % % T R R A A N NN SS F O O R R M M % % T R R A A N N SSSSS F OOO R R M M % % % % % % MagickCore Image Transform Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/distort.h" #include "magick/draw.h" #include "magick/effect.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/memory_.h" #include "magick/layer.h" #include "magick/list.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/resize.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/transform.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o O r i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoOrientImage() adjusts an image so that its orientation is suitable for % viewing (i.e. top-left orientation). % % The format of the AutoOrientImage method is: % % Image *AutoOrientImage(const Image *image, % const OrientationType orientation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o orientation: Current image orientation. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *AutoOrientImage(const Image *image, const OrientationType orientation,ExceptionInfo *exception) { Image *orient_image; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); orient_image=(Image *) NULL; switch(orientation) { case UndefinedOrientation: case TopLeftOrientation: default: { orient_image=CloneImage(image,0,0,MagickTrue,exception); break; } case TopRightOrientation: { orient_image=FlopImage(image,exception); break; } case BottomRightOrientation: { orient_image=RotateImage(image,180.0,exception); break; } case BottomLeftOrientation: { orient_image=FlipImage(image,exception); break; } case LeftTopOrientation: { orient_image=TransposeImage(image,exception); break; } case RightTopOrientation: { orient_image=RotateImage(image,90.0,exception); break; } case RightBottomOrientation: { orient_image=TransverseImage(image,exception); break; } case LeftBottomOrientation: { orient_image=RotateImage(image,270.0,exception); break; } } if (orient_image != (Image *) NULL) orient_image->orientation=TopLeftOrientation; return(orient_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChopImage() removes a region of an image and collapses the image to occupy % the removed portion. % % The format of the ChopImage method is: % % Image *ChopImage(const Image *image,const RectangleInfo *chop_info) % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o chop_info: Define the region of the image to chop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info, ExceptionInfo *exception) { #define ChopImageTag "Chop/Image" CacheView *chop_view, *image_view; Image *chop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo extent; ssize_t y; /* Check chop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); assert(chop_info != (RectangleInfo *) NULL); if (((chop_info->x+(ssize_t) chop_info->width) < 0) || ((chop_info->y+(ssize_t) chop_info->height) < 0) || (chop_info->x > (ssize_t) image->columns) || (chop_info->y > (ssize_t) image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); extent=(*chop_info); if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns) extent.width=(size_t) ((ssize_t) image->columns-extent.x); if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows) extent.height=(size_t) ((ssize_t) image->rows-extent.y); if (extent.x < 0) { extent.width-=(size_t) (-extent.x); extent.x=0; } if (extent.y < 0) { extent.height-=(size_t) (-extent.y); extent.y=0; } chop_image=CloneImage(image,image->columns-extent.width,image->rows- extent.height,MagickTrue,exception); if (chop_image == (Image *) NULL) return((Image *) NULL); /* Extract chop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); chop_view=AcquireAuthenticCacheView(chop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,chop_image,1,1) #endif for (y=0; y < (ssize_t) extent.y; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict chop_indexes, *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { *q=(*p); if (indexes != (IndexPacket *) NULL) { if (chop_indexes != (IndexPacket *) NULL) *chop_indexes++=GetPixelIndex(indexes+x); } q++; } p++; } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ChopImage) #endif proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* Extract chop image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict chop_indexes, *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns, 1,exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { *q=(*p); if (indexes != (IndexPacket *) NULL) { if (chop_indexes != (IndexPacket *) NULL) *chop_indexes++=GetPixelIndex(indexes+x); } q++; } p++; } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ChopImage) #endif proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } chop_view=DestroyCacheView(chop_view); image_view=DestroyCacheView(image_view); chop_image->type=image->type; if (status == MagickFalse) chop_image=DestroyImage(chop_image); return(chop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C M Y K I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a % single image. % % The format of the ConsolidateCMYKImage method is: % % Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConsolidateCMYKImages(const Image *images, ExceptionInfo *exception) { CacheView *cmyk_view, *image_view; Image *cmyk_image, *cmyk_images; register ssize_t i; ssize_t y; /* Consolidate separate C, M, Y, and K planes into a single image. */ assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); cmyk_images=NewImageList(); for (i=0; i < (ssize_t) GetImageListLength(images); i+=4) { cmyk_image=CloneImage(images,images->columns,images->rows,MagickTrue, exception); if (cmyk_image == (Image *) NULL) break; if (SetImageStorageClass(cmyk_image,DirectClass) == MagickFalse) break; (void) SetImageColorspace(cmyk_image,CMYKColorspace); image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { SetPixelRed(q,ClampToQuantum(QuantumRange-GetPixelIntensity(images,p))); p++; q++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { q->green=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)); p++; q++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { q->blue=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)); p++; q++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewAuthenticIndexQueue(cmyk_view); for (x=0; x < (ssize_t) images->columns; x++) { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange- GetPixelIntensity(images,p))); p++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); AppendImageToList(&cmyk_images,cmyk_image); images=GetNextImageInList(images); if (images == (Image *) NULL) break; } return(cmyk_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImage() extracts a region of the image starting at the offset defined % by geometry. Region must be fully defined, and no special handling of % geometry flags is performed. % % The format of the CropImage method is: % % Image *CropImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to crop with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry, ExceptionInfo *exception) { #define CropImageTag "Crop/Image" CacheView *crop_view, *image_view; Image *crop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo bounding_box, page; ssize_t y; /* Check crop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); bounding_box=image->page; if ((bounding_box.width == 0) || (bounding_box.height == 0)) { bounding_box.width=image->columns; bounding_box.height=image->rows; } page=(*geometry); if (page.width == 0) page.width=bounding_box.width; if (page.height == 0) page.height=bounding_box.height; if (((bounding_box.x-page.x) >= (ssize_t) page.width) || ((bounding_box.y-page.y) >= (ssize_t) page.height) || ((page.x-bounding_box.x) > (ssize_t) image->columns) || ((page.y-bounding_box.y) > (ssize_t) image->rows)) { /* Crop is not within virtual canvas, return 1 pixel transparent image. */ (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.opacity=(Quantum) TransparentOpacity; (void) SetImageBackgroundColor(crop_image); crop_image->page=bounding_box; crop_image->page.x=(-1); crop_image->page.y=(-1); if (crop_image->dispose == BackgroundDispose) crop_image->dispose=NoneDispose; return(crop_image); } if ((page.x < 0) && (bounding_box.x >= 0)) { page.width+=page.x-bounding_box.x; page.x=0; } else { page.width-=bounding_box.x-page.x; page.x-=bounding_box.x; if (page.x < 0) page.x=0; } if ((page.y < 0) && (bounding_box.y >= 0)) { page.height+=page.y-bounding_box.y; page.y=0; } else { page.height-=bounding_box.y-page.y; page.y-=bounding_box.y; if (page.y < 0) page.y=0; } if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns) page.width=image->columns-page.x; if ((geometry->width != 0) && (page.width > geometry->width)) page.width=geometry->width; if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows) page.height=image->rows-page.y; if ((geometry->height != 0) && (page.height > geometry->height)) page.height=geometry->height; bounding_box.x+=page.x; bounding_box.y+=page.y; if ((page.width == 0) || (page.height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return((Image *) NULL); } /* Initialize crop image attributes. */ crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->page.width=image->page.width; crop_image->page.height=image->page.height; if (((ssize_t) (bounding_box.x+bounding_box.width) > (ssize_t) image->page.width) || ((ssize_t) (bounding_box.y+bounding_box.height) > (ssize_t) image->page.height)) { crop_image->page.width=bounding_box.width; crop_image->page.height=bounding_box.height; } crop_image->page.x=bounding_box.x; crop_image->page.y=bounding_box.y; /* Crop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); crop_view=AcquireAuthenticCacheView(crop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,crop_image,1,1) #endif for (y=0; y < (ssize_t) crop_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict crop_indexes; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns, 1,exception); q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); crop_indexes=GetCacheViewAuthenticIndexQueue(crop_view); (void) CopyMagickMemory(q,p,(size_t) crop_image->columns*sizeof(*p)); if ((indexes != (IndexPacket *) NULL) && (crop_indexes != (IndexPacket *) NULL)) (void) CopyMagickMemory(crop_indexes,indexes,(size_t) crop_image->columns* sizeof(*crop_indexes)); if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CropImage) #endif proceed=SetImageProgress(image,CropImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } crop_view=DestroyCacheView(crop_view); image_view=DestroyCacheView(image_view); crop_image->type=image->type; if (status == MagickFalse) crop_image=DestroyImage(crop_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e T o T i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImageToTiles() crops a single image, into a possible list of tiles. % This may include a single sub-region of the image. This basically applies % all the normal geometry flags for Crop. % % Image *CropImageToTiles(const Image *image, % const RectangleInfo *crop_geometry, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport Image *CropImageToTiles(const Image *image, const char *crop_geometry,ExceptionInfo *exception) { Image *next, *crop_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); crop_image=NewImageList(); next=NewImageList(); flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception); if ((flags & AreaValue) != 0) { PointInfo delta, offset; RectangleInfo crop; size_t height, width; /* Crop into NxM tiles (@ flag). */ width=image->columns; height=image->rows; if (geometry.width == 0) geometry.width=1; if (geometry.height == 0) geometry.height=1; if ((flags & AspectValue) == 0) { width-=(geometry.x < 0 ? -1 : 1)*geometry.x; height-=(geometry.y < 0 ? -1 : 1)*geometry.y; } else { width+=(geometry.x < 0 ? -1 : 1)*geometry.x; height+=(geometry.y < 0 ? -1 : 1)*geometry.y; } delta.x=(double) width/geometry.width; delta.y=(double) height/geometry.height; if (delta.x < 1.0) delta.x=1.0; if (delta.y < 1.0) delta.y=1.0; for (offset.y=0; offset.y < (double) height; ) { if ((flags & AspectValue) == 0) { crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y- (geometry.y > 0 ? 0 : geometry.y))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((MagickRealType) (offset.y+ (geometry.y < 0 ? 0 : geometry.y))); } else { crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y- (geometry.y > 0 ? geometry.y : 0))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((MagickRealType) (offset.y+ (geometry.y < 0 ? geometry.y : 0))); } crop.height-=crop.y; crop.y+=image->page.y; for (offset.x=0; offset.x < (double) width; ) { if ((flags & AspectValue) == 0) { crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x- (geometry.x > 0 ? 0 : geometry.x))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((MagickRealType) (offset.x+ (geometry.x < 0 ? 0 : geometry.x))); } else { crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x- (geometry.x > 0 ? geometry.x : 0))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((MagickRealType) (offset.x+ (geometry.x < 0 ? geometry.x : 0))); } crop.width-=crop.x; crop.x+=image->page.x; next=CropImage(image,&crop,exception); if (next != (Image *) NULL) AppendImageToList(&crop_image,next); } } ClearMagickException(exception); return(crop_image); } if (((geometry.width == 0) && (geometry.height == 0)) || ((flags & XValue) != 0) || ((flags & YValue) != 0)) { /* Crop a single region at +X+Y. */ crop_image=CropImage(image,&geometry,exception); if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0)) { crop_image->page.width=geometry.width; crop_image->page.height=geometry.height; crop_image->page.x-=geometry.x; crop_image->page.y-=geometry.y; } return(crop_image); } if ((image->columns > geometry.width) || (image->rows > geometry.height)) { RectangleInfo page; size_t height, width; ssize_t x, y; /* Crop into tiles of fixed size WxH. */ page=image->page; if (page.width == 0) page.width=image->columns; if (page.height == 0) page.height=image->rows; width=geometry.width; if (width == 0) width=page.width; height=geometry.height; if (height == 0) height=page.height; next=NewImageList(); for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height) { for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width) { geometry.width=width; geometry.height=height; geometry.x=x; geometry.y=y; next=CropImage(image,&geometry,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } return(crop_image); } return(CloneImage(image,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x c e r p t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExcerptImage() returns a excerpt of the image as defined by the geometry. % % The format of the ExcerptImage method is: % % Image *ExcerptImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExcerptImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define ExcerptImageTag "Excerpt/Image" CacheView *excerpt_view, *image_view; Image *excerpt_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate excerpt image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (excerpt_image == (Image *) NULL) return((Image *) NULL); /* Excerpt each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,excerpt_image,excerpt_image->rows,1) #endif for (y=0; y < (ssize_t) excerpt_image->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict excerpt_indexes, *magick_restrict indexes; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } (void) CopyMagickMemory(q,p,(size_t) excerpt_image->columns*sizeof(*q)); indexes=GetCacheViewAuthenticIndexQueue(image_view); if (indexes != (IndexPacket *) NULL) { excerpt_indexes=GetCacheViewAuthenticIndexQueue(excerpt_view); if (excerpt_indexes != (IndexPacket *) NULL) (void) CopyMagickMemory(excerpt_indexes,indexes,(size_t) excerpt_image->columns*sizeof(*excerpt_indexes)); } if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ExcerptImage) #endif proceed=SetImageProgress(image,ExcerptImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } excerpt_view=DestroyCacheView(excerpt_view); image_view=DestroyCacheView(image_view); excerpt_image->type=image->type; if (status == MagickFalse) excerpt_image=DestroyImage(excerpt_image); return(excerpt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtentImage() extends the image as defined by the geometry, gravity, and % image background color. Set the (x,y) offset of the geometry to move the % original image relative to the extended image. % % The format of the ExtentImage method is: % % Image *ExtentImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExtentImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { Image *extent_image; /* Allocate extent image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((image->columns == geometry->width) && (image->rows == geometry->height) && (geometry->x == 0) && (geometry->y == 0)) return(CloneImage(image,0,0,MagickTrue,exception)); extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (extent_image == (Image *) NULL) return((Image *) NULL); (void) SetImageBackgroundColor(extent_image); (void) CompositeImage(extent_image,image->compose,image,-geometry->x, -geometry->y); return(extent_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlipImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis. % % The format of the FlipImage method is: % % Image *FlipImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception) { #define FlipImageTag "Flip/Image" CacheView *flip_view, *image_view; Image *flip_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); flip_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (flip_image == (Image *) NULL) return((Image *) NULL); /* Flip image. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flip_view=AcquireAuthenticCacheView(flip_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,flip_image,1,1) #endif for (y=0; y < (ssize_t) flip_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict flip_indexes; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y- 1),flip_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } (void) CopyMagickMemory(q,p,(size_t) image->columns*sizeof(*q)); indexes=GetCacheViewVirtualIndexQueue(image_view); if (indexes != (const IndexPacket *) NULL) { flip_indexes=GetCacheViewAuthenticIndexQueue(flip_view); if (flip_indexes != (IndexPacket *) NULL) (void) CopyMagickMemory(flip_indexes,indexes,(size_t) image->columns* sizeof(*flip_indexes)); } if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FlipImage) #endif proceed=SetImageProgress(image,FlipImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flip_view=DestroyCacheView(flip_view); image_view=DestroyCacheView(image_view); flip_image->type=image->type; if (page.height != 0) page.y=(ssize_t) (page.height-flip_image->rows-page.y); flip_image->page=page; if (status == MagickFalse) flip_image=DestroyImage(flip_image); return(flip_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlopImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis. % % The format of the FlopImage method is: % % Image *FlopImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception) { #define FlopImageTag "Flop/Image" CacheView *flop_view, *image_view; Image *flop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); flop_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (flop_image == (Image *) NULL) return((Image *) NULL); /* Flop each row. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flop_view=AcquireAuthenticCacheView(flop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,flop_image,1,1) #endif for (y=0; y < (ssize_t) flop_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict flop_indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1, exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } q+=flop_image->columns; indexes=GetCacheViewVirtualIndexQueue(image_view); flop_indexes=GetCacheViewAuthenticIndexQueue(flop_view); for (x=0; x < (ssize_t) flop_image->columns; x++) { (*--q)=(*p++); if ((indexes != (const IndexPacket *) NULL) && (flop_indexes != (IndexPacket *) NULL)) SetPixelIndex(flop_indexes+flop_image->columns-x-1, GetPixelIndex(indexes+x)); } if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FlopImage) #endif proceed=SetImageProgress(image,FlopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flop_view=DestroyCacheView(flop_view); image_view=DestroyCacheView(image_view); flop_image->type=image->type; if (page.width != 0) page.x=(ssize_t) (page.width-flop_image->columns-page.x); flop_image->page=page; if (status == MagickFalse) flop_image=DestroyImage(flop_image); return(flop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RollImage() offsets an image as defined by x_offset and y_offset. % % The format of the RollImage method is: % % Image *RollImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset: the number of columns to roll in the horizontal direction. % % o y_offset: the number of rows to roll in the vertical direction. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy, const ssize_t dx,const ssize_t dy,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; ssize_t y; if (columns == 0) return(MagickTrue); status=MagickTrue; source_view=AcquireVirtualCacheView(source,exception); destination_view=AcquireAuthenticCacheView(destination,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(source,destination,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict destination_indexes; register PixelPacket *magick_restrict q; /* Transfer scanline. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception); q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source_view); (void) CopyMagickMemory(q,p,(size_t) columns*sizeof(*p)); if (indexes != (IndexPacket *) NULL) { destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view); if (destination_indexes != (IndexPacket *) NULL) (void) CopyMagickMemory(destination_indexes,indexes,(size_t) columns*sizeof(*indexes)); } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *RollImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define RollImageTag "Roll/Image" Image *roll_image; MagickStatusType status; RectangleInfo offset; /* Initialize roll image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); roll_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (roll_image == (Image *) NULL) return((Image *) NULL); offset.x=x_offset; offset.y=y_offset; while (offset.x < 0) offset.x+=(ssize_t) image->columns; while (offset.x >= (ssize_t) image->columns) offset.x-=(ssize_t) image->columns; while (offset.y < 0) offset.y+=(ssize_t) image->rows; while (offset.y >= (ssize_t) image->rows) offset.y-=(ssize_t) image->rows; /* Roll image. */ status=CopyImageRegion(roll_image,image,(size_t) offset.x, (size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows- offset.y,0,0,exception); (void) SetImageProgress(image,RollImageTag,0,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x, (size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0, exception); (void) SetImageProgress(image,RollImageTag,1,3); status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows- offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception); (void) SetImageProgress(image,RollImageTag,2,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows- offset.y,0,0,offset.x,offset.y,exception); (void) SetImageProgress(image,RollImageTag,3,3); roll_image->type=image->type; if (status == MagickFalse) roll_image=DestroyImage(roll_image); return(roll_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShaveImage() shaves pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ShaveImage method is: % % Image *ShaveImage(const Image *image,const RectangleInfo *shave_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o shave_image: Method ShaveImage returns a pointer to the shaved % image. A null image is returned if there is a memory shortage or % if the image width or height is zero. % % o image: the image. % % o shave_info: Specifies a pointer to a RectangleInfo which defines the % region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShaveImage(const Image *image, const RectangleInfo *shave_info,ExceptionInfo *exception) { Image *shave_image; RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((2*shave_info->width) >= image->columns) || ((2*shave_info->height) >= image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); SetGeometry(image,&geometry); geometry.width-=2*shave_info->width; geometry.height-=2*shave_info->height; geometry.x=(ssize_t) shave_info->width+image->page.x; geometry.y=(ssize_t) shave_info->height+image->page.y; shave_image=CropImage(image,&geometry,exception); if (shave_image == (Image *) NULL) return((Image *) NULL); shave_image->page.width-=2*shave_info->width; shave_image->page.height-=2*shave_info->height; shave_image->page.x-=(ssize_t) shave_info->width; shave_image->page.y-=(ssize_t) shave_info->height; return(shave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImage() splices a solid color into the image as defined by the % geometry. % % The format of the SpliceImage method is: % % Image *SpliceImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to splice with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define SpliceImageTag "Splice/Image" CacheView *image_view, *splice_view; Image *splice_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo splice_geometry; ssize_t columns, y; /* Allocate splice image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); splice_geometry=(*geometry); splice_image=CloneImage(image,image->columns+splice_geometry.width, image->rows+splice_geometry.height,MagickTrue,exception); if (splice_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(splice_image,DirectClass) == MagickFalse) { InheritException(exception,&splice_image->exception); splice_image=DestroyImage(splice_image); return((Image *) NULL); } (void) SetImageBackgroundColor(splice_image); /* Respect image geometry. */ switch (image->gravity) { default: case UndefinedGravity: case NorthWestGravity: break; case NorthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; break; } case NorthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; break; } case WestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.width/2; break; } case StaticGravity: case CenterGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case EastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case SouthWestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } } /* Splice image. */ status=MagickTrue; progress=0; columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns); image_view=AcquireVirtualCacheView(image,exception); splice_view=AcquireAuthenticCacheView(splice_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,splice_image,1,1) #endif for (y=0; y < (ssize_t) splice_geometry.y; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes, *magick_restrict splice_indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view); for (x=0; x < columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q++; for ( ; x < (ssize_t) splice_image->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,SpliceImageTag,progress++, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,splice_image,1,1) #endif for (y=(ssize_t) (splice_geometry.y+splice_geometry.height); y < (ssize_t) splice_image->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes, *magick_restrict splice_indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; if ((y < 0) || (y >= (ssize_t)splice_image->rows)) continue; p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height, splice_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view); for (x=0; x < columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q++; for ( ; x < (ssize_t) splice_image->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,SpliceImageTag,progress++, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } splice_view=DestroyCacheView(splice_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) splice_image=DestroyImage(splice_image); return(splice_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImage() is a convenience method that behaves like ResizeImage() or % CropImage() but accepts scaling and/or cropping information as a region % geometry specification. If the operation fails, the original image handle % is left as is. % % This should only be used for single images. % % The format of the TransformImage method is: % % MagickBooleanType TransformImage(Image **image,const char *crop_geometry, % const char *image_geometry) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % */ /* DANGER: This function destroys what it assumes to be a single image list. If the input image is part of a larger list, all other images in that list will be simply 'lost', not destroyed. Also if the crop generates a list of images only the first image is resized. And finally if the crop succeeds and the resize failed, you will get a cropped image, as well as a 'false' or 'failed' report. This function and should probably be deprecated in favor of direct calls to CropImageToTiles() or ResizeImage(), as appropriate. */ MagickExport MagickBooleanType TransformImage(Image **image, const char *crop_geometry,const char *image_geometry) { Image *resize_image, *transform_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image **) NULL); assert((*image)->signature == MagickSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); transform_image=(*image); if (crop_geometry != (const char *) NULL) { Image *crop_image; /* Crop image to a user specified size. */ crop_image=CropImageToTiles(*image,crop_geometry,&(*image)->exception); if (crop_image == (Image *) NULL) transform_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception); else { transform_image=DestroyImage(transform_image); transform_image=GetFirstImageInList(crop_image); } *image=transform_image; } if (image_geometry == (const char *) NULL) return(MagickTrue); /* Scale image to a user specified size. */ flags=ParseRegionGeometry(transform_image,image_geometry,&geometry, &(*image)->exception); (void) flags; if ((transform_image->columns == geometry.width) && (transform_image->rows == geometry.height)) return(MagickTrue); resize_image=ResizeImage(transform_image,geometry.width,geometry.height, transform_image->filter,transform_image->blur,&(*image)->exception); if (resize_image == (Image *) NULL) return(MagickFalse); transform_image=DestroyImage(transform_image); transform_image=resize_image; *image=transform_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImages() calls TransformImage() on each image of a sequence. % % The format of the TransformImage method is: % % MagickBooleanType TransformImages(Image **image, % const char *crop_geometry,const char *image_geometry) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % */ MagickExport MagickBooleanType TransformImages(Image **images, const char *crop_geometry,const char *image_geometry) { Image *image, **image_list, *transform_images; MagickStatusType status; register ssize_t i; assert(images != (Image **) NULL); assert((*images)->signature == MagickSignature); if ((*images)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", (*images)->filename); image_list=ImageListToArray(*images,&(*images)->exception); if (image_list == (Image **) NULL) return(MagickFalse); status=MagickTrue; transform_images=NewImageList(); for (i=0; image_list[i] != (Image *) NULL; i++) { image=image_list[i]; status&=TransformImage(&image,crop_geometry,image_geometry); AppendImageToList(&transform_images,image); } *images=transform_images; image_list=(Image **) RelinquishMagickMemory(image_list); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p o s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransposeImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis while rotating them by 90 degrees. % % The format of the TransposeImage method is: % % Image *TransposeImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception) { #define TransposeImageTag "Transpose/Image" CacheView *image_view, *transpose_view; Image *transpose_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transpose_image == (Image *) NULL) return((Image *) NULL); /* Transpose image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transpose_view=AcquireAuthenticCacheView(transpose_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,transpose_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict transpose_indexes, *magick_restrict indexes; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1), 0,1,transpose_image->rows,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } (void) CopyMagickMemory(q,p,(size_t) image->columns*sizeof(*q)); indexes=GetCacheViewAuthenticIndexQueue(image_view); if (indexes != (IndexPacket *) NULL) { transpose_indexes=GetCacheViewAuthenticIndexQueue(transpose_view); if (transpose_indexes != (IndexPacket *) NULL) (void) CopyMagickMemory(transpose_indexes,indexes,(size_t) image->columns*sizeof(*transpose_indexes)); } if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,TransposeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transpose_view=DestroyCacheView(transpose_view); image_view=DestroyCacheView(image_view); transpose_image->type=image->type; page=transpose_image->page; Swap(page.width,page.height); Swap(page.x,page.y); transpose_image->page=page; if (status == MagickFalse) transpose_image=DestroyImage(transpose_image); return(transpose_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s v e r s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransverseImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis while rotating them by 270 degrees. % % The format of the TransverseImage method is: % % Image *TransverseImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception) { #define TransverseImageTag "Transverse/Image" CacheView *image_view, *transverse_view; Image *transverse_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transverse_image == (Image *) NULL) return((Image *) NULL); /* Transverse image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transverse_view=AcquireAuthenticCacheView(transverse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,transverse_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict transverse_indexes, *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y- 1),0,1,transverse_image->rows,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } q+=image->columns; for (x=0; x < (ssize_t) image->columns; x++) *--q=(*p++); indexes=GetCacheViewAuthenticIndexQueue(image_view); if (indexes != (IndexPacket *) NULL) { transverse_indexes=GetCacheViewAuthenticIndexQueue(transverse_view); if (transverse_indexes != (IndexPacket *) NULL) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(transverse_indexes+image->columns-x-1, GetPixelIndex(indexes+x)); } sync=SyncCacheViewAuthenticPixels(transverse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransverseImage) #endif proceed=SetImageProgress(image,TransverseImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transverse_view=DestroyCacheView(transverse_view); image_view=DestroyCacheView(image_view); transverse_image->type=image->type; page=transverse_image->page; Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-transverse_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-transverse_image->rows-page.y); transverse_image->page=page; if (status == MagickFalse) transverse_image=DestroyImage(transverse_image); return(transverse_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r i m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TrimImage() trims pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the TrimImage method is: % % Image *TrimImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception) { RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); geometry=GetImageBoundingBox(image,exception); if ((geometry.width == 0) || (geometry.height == 0)) { Image *crop_image; crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.opacity=(Quantum) TransparentOpacity; (void) SetImageBackgroundColor(crop_image); crop_image->page=image->page; crop_image->page.x=(-1); crop_image->page.y=(-1); return(crop_image); } geometry.x+=image->page.x; geometry.y+=image->page.y; return(CropImage(image,&geometry,exception)); }
sageInterface_modified.h
#ifndef ROSE_SAGE_INTERFACE #define ROSE_SAGE_INTERFACE #include "sage3basic.hhh" #include <stdint.h> #include <utility> #include "rosePublicConfig.h" // for ROSE_BUILD_JAVA_LANGUAGE_SUPPORT #if 0 // FMZ(07/07/2010): the argument "nextErrorCode" should be call-by-reference SgFile* determineFileType ( std::vector<std::string> argv, int nextErrorCode, SgProject* project ); #else SgFile* determineFileType ( std::vector<std::string> argv, int& nextErrorCode, SgProject* project ); #endif #ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT #include "rewrite.h" #endif // DQ (7/20/2008): Added support for unparsing abitrary strings in the unparser. #include "astUnparseAttribute.h" #include <set> #ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT #include "LivenessAnalysis.h" #include "abstract_handle.h" #include "ClassHierarchyGraph.h" #endif // DQ (8/19/2004): Moved from ROSE/src/midend/astRewriteMechanism/rewrite.h //! A global function for getting the string associated with an enum (which is defined in global scope) ROSE_DLL_API std::string getVariantName (VariantT v); // DQ (12/9/2004): Qing, Rich and Dan have decided to start this namespace within ROSE // This namespace is specific to interface functions that operate on the Sage III AST. // The name was chosen so as not to conflict with other classes within ROSE. // This will become the future home of many interface functions which operate on // the AST and which are generally useful to users. As a namespace multiple files can be used // to represent the compete interface and different developers may contribute interface // functions easily. // Constructor handling: (We have sageBuilder.h now for this purpose, Liao 2/1/2008) // We could add simpler layers of support for construction of IR nodes by // hiding many details in "makeSg***()" functions. Such functions would // return pointers to the associated Sg*** objects and would be able to hide // many IR specific details, including: // memory handling // optional parameter settings not often required // use of Sg_File_Info objects (and setting them as transformations) // // namespace AST_Interface (this name is taken already by some of Qing's work :-) //! An alias for Sg_File_Info::generateDefaultFileInfoForTransformationNode() #define TRANS_FILE Sg_File_Info::generateDefaultFileInfoForTransformationNode() //------------------------------------------------------------------------ /*! \brief This namespace is to organize functions that are useful when operating on the AST. \defgroup frontendSageUtilityFunctions SAGE III utility functions(SageInterface) \ingroup ROSE_FrontEndGroup The Sage III IR design attempts to be minimalist. Thus additional functionality is intended to be presented using separate higher level interfaces which work with the IR. The namespace, SageInterface, collects functions that operate on the IR and are supportive of numerous types of routine operations required to support general analysis and transformation of the AST. \internal Further organization of the functions in this namespace is required. Major AST manipulation functions are scattered in the following directories - src/midend/astUtil/astInterface - src/roseSupport/utility_function.h, namespace ROSE - src/roseSupport/TransformationSupport.h, class TransformationSupport - src/midend/astInlining/inlinerSupport.C - src/frontend/SageIII/sageInterface - projects: such as outliner, OpenMP_Translator Some other utility functions not related AST can be found in - src/util/stringSupport/string_functions.h, namespace StringUtility - src/roseExtensions/dataStructureTraversal/helpFunctions.C - projects/dataStructureGraphing/helpFunctions.C \todo A number of additional things to do: - Pull scope handling out of EDG/Sage III translation so that is is made available to anyone else building the Sage III IR from scratch (which when it gets non-trivial, involves the manipulation of scopes). - Other stuff ... */ namespace SageInterface { // DQ (4/3/2014): Added general AST support seperate from the AST. // Container and API for analysis information that is outside of the AST and as a result // prevents frequent modification of the IR. class DeclarationSets { // DQ (4/3/2014): This stores all associated declarations as a map of sets. // the key to the map is the first nondefining declaration and the elements of the set are // all of the associated declarations (including the defining declaration). private: //! Map of first-nondefining declaration to all other associated declarations. std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > declarationMap; public: void addDeclaration(SgDeclarationStatement* decl); const std::set<SgDeclarationStatement*>* getDeclarations(SgDeclarationStatement* decl); std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > & getDeclarationMap(); bool isLocatedInDefiningScope(SgDeclarationStatement* decl); }; // DQ (4/3/2014): This constucts a data structure that holds analysis information about // the AST that is seperate from the AST. This is intended to be a general mechanism // to support analysis information without constantly modifing the IR. DeclarationSets* buildDeclarationSets(SgNode*); //! An internal counter for generating unique SgName ROSE_DLL_API extern int gensym_counter; // tps : 28 Oct 2008 - support for finding the main interpretation SgAsmInterpretation* getMainInterpretation(SgAsmGenericFile* file); //! Get the unsigned value of a disassembled constant. uint64_t getAsmConstant(SgAsmValueExpression* e); //! Get the signed value of a disassembled constant. int64_t getAsmSignedConstant(SgAsmValueExpression *e); //! Function to add "C" style comment to statement. void addMessageStatement( SgStatement* stmt, std::string message ); //! A persistent attribute to represent a unique name for an expression class UniqueNameAttribute : public AstAttribute { private: std::string name; public: UniqueNameAttribute(std::string n="") {name =n; }; void set_name (std::string n) {name = n;}; std::string get_name () {return name;}; }; // DQ (3/2/2009): Added support for collectiong an merging the referenced symbols in the outlined // function into the list used to edit the outlined code subtree to fixup references (from symbols // in the original file to the symbols in the newer separate file). // typedef rose_hash::unordered_map<SgNode*, SgNode*, hash_nodeptr> ReplacementMapType; // void supplementReplacementSymbolMap ( const ReplacementMapTraversal::ReplacementMapType & inputReplacementMap ); // CH (4/9/2010): Use boost::hash instead //#ifdef _MSC_VER #if 0 inline size_t hash_value(SgNode* t) {return (size_t)t;} #endif struct hash_nodeptr { // CH (4/9/2010): Use boost::hash instead //#ifndef _MSC_VER #if 0 //rose_hash::hash<char*> hasher; #endif public: size_t operator()(SgNode* node) const { // CH (4/9/2010): Use boost::hash instead //#ifdef _MSC_VER #if 0 return (size_t) hash_value(node); #else return (size_t) node; #endif } }; #ifndef SWIG // DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time). void supplementReplacementSymbolMap ( rose_hash::unordered_map<SgNode*, SgNode*, hash_nodeptr> & inputReplacementMap ); #endif //------------------------------------------------------------------------ //@{ /*! @name Symbol tables \brief utility functions for symbol tables */ // Liao 1/22/2008, used for get symbols for generating variable reference nodes // ! Find a variable symbol in current and ancestor scopes for a given name ROSE_DLL_API SgVariableSymbol *lookupVariableSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL); // DQ (8/21/2013): Modified to make newest function parameters be default arguments. // DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments. //! Find a symbol in current and ancestor scopes for a given variable name, starting from top of ScopeStack if currentscope is not given or NULL. // SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL); // SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList); ROSE_DLL_API SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL); // DQ (11/24/2007): Functions moved from the Fortran support so that they could be called from within astPostProcessing. //!look up the first matched function symbol in parent scopes given only a function name, starting from top of ScopeStack if currentscope is not given or NULL ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName, SgScopeStatement *currentScope=NULL); // Liao, 1/24/2008, find exact match for a function //!look up function symbol in parent scopes given both name and function type, starting from top of ScopeStack if currentscope is not given or NULL ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName, const SgType* t, SgScopeStatement *currentScope=NULL); // DQ (8/21/2013): Modified to make newest function parameters be default arguments. // DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments. // DQ (5/7/2011): Added support for SgClassSymbol (used in name qualification support). // SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL); ROSE_DLL_API SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL); ROSE_DLL_API SgTypedefSymbol* lookupTypedefSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL); #if 0 // DQ (8/13/2013): This function does not make since any more, now that we have made the symbol // table handling more precise and we have to provide template parameters for any template lookup. // We also have to know if we want to lookup template classes, template functions, or template // member functions (since each have specific requirements). SgTemplateSymbol* lookupTemplateSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL); #endif #if 0 // DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes. // Where these are called we might not know enough information about the template parameters or function // types, for example. SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL); SgTemplateFunctionSymbol* lookupTemplateFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL); SgTemplateMemberFunctionSymbol* lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL); #endif // DQ (8/21/2013): Modified to make some of the newest function parameters be default arguments. // DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes. ROSE_DLL_API SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList, SgScopeStatement *cscope = NULL); ROSE_DLL_API SgEnumSymbol* lookupEnumSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL); ROSE_DLL_API SgNamespaceSymbol* lookupNamespaceSymbolInParentScopes(const SgName & name, SgScopeStatement *currentScope = NULL); // DQ (7/17/2011): Added function from cxx branch that I need here for the Java support. // SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *cscope); /*! \brief set_name of symbol in symbol table. This function extracts the symbol from the relavant symbol table, changes the name (at the declaration) and reinserts it into the symbol table. \internal I think this is what this function does, I need to double check. */ // DQ (12/9/2004): Moved this function (by Alin Jula) from being a member of SgInitializedName // to this location where it can be a part of the interface for the Sage III AST. ROSE_DLL_API int set_name (SgInitializedName * initializedNameNode, SgName new_name); /*! \brief Output function type symbols in global function type symbol table. */ void outputGlobalFunctionTypeSymbolTable (); // DQ (6/27/2005): /*! \brief Output the local symbol tables. \implementation Each symbol table is output with the file infor where it is located in the source code. */ ROSE_DLL_API void outputLocalSymbolTables (SgNode * node); class OutputLocalSymbolTables:public AstSimpleProcessing { public: void visit (SgNode * node); }; /*! \brief Regenerate the symbol table. \implementation current symbol table must be NULL pointer before calling this function (for safety, but is this a good idea?) */ // DQ (9/28/2005): void rebuildSymbolTable (SgScopeStatement * scope); /*! \brief Clear those variable symbols with unknown type (together with initialized names) which are also not referenced by any variable references or declarations under root. If root is NULL, all symbols with unknown type will be deleted. */ void clearUnusedVariableSymbols (SgNode* root = NULL); // DQ (3/1/2009): //! All the symbol table references in the copied AST need to be reset after rebuilding the copied scope's symbol table. void fixupReferencesToSymbols( const SgScopeStatement* this_scope, SgScopeStatement* copy_scope, SgCopyHelp & help ); //@} //------------------------------------------------------------------------ //@{ /*! @name Stringify \brief Generate a useful string (name) to describe a SgNode */ /*! \brief Generate a useful name to describe the SgNode \internal default names are used for SgNode objects that can not be associated with a name. */ // DQ (9/21/2005): General function for extracting the name of declarations (when they have names) std::string get_name (const SgNode * node); /*! \brief Generate a useful name to describe the declaration \internal default names are used for declarations that can not be associated with a name. */ // DQ (6/13/2005): General function for extracting the name of declarations (when they have names) std::string get_name (const SgStatement * stmt); /*! \brief Generate a useful name to describe the expression \internal default names are used for expressions that can not be associated with a name. */ std::string get_name (const SgExpression * expr); /*! \brief Generate a useful name to describe the declaration \internal default names are used for declarations that can not be associated with a name. */ // DQ (6/13/2005): General function for extracting the name of declarations (when they have names) std::string get_name (const SgDeclarationStatement * declaration); /*! \brief Generate a useful name to describe the scope \internal default names are used for scope that cannot be associated with a name. */ // DQ (6/13/2005): General function for extracting the name of declarations (when they have names) std::string get_name (const SgScopeStatement * scope); /*! \brief Generate a useful name to describe the SgSymbol \internal default names are used for SgSymbol objects that cannot be associated with a name. */ // DQ (2/11/2007): Added this function to make debugging support more complete (useful for symbol table debugging support). std::string get_name (const SgSymbol * symbol); /*! \brief Generate a useful name to describe the SgType \internal default names are used for SgType objects that cannot be associated with a name. */ std::string get_name (const SgType * type); /*! \brief Generate a useful name to describe the SgSupport IR node */ std::string get_name (const SgSupport * node); /*! \brief Generate a useful name to describe the SgLocatedNodeSupport IR node */ std::string get_name (const SgLocatedNodeSupport * node); /*! \brief Generate a useful name to describe the SgC_PreprocessorDirectiveStatement IR node */ std::string get_name ( const SgC_PreprocessorDirectiveStatement* directive ); /*! \brief Generate a useful name to describe the SgToken IR node */ std::string get_name ( const SgToken* token ); //@} //------------------------------------------------------------------------ //@{ /*! @name Class utilities \brief */ /*! \brief Get the default destructor from the class declaration */ // DQ (6/21/2005): Get the default destructor from the class declaration SgMemberFunctionDeclaration *getDefaultDestructor (SgClassDeclaration * classDeclaration); /*! \brief Get the default constructor from the class declaration */ // DQ (6/22/2005): Get the default constructor from the class declaration ROSE_DLL_API SgMemberFunctionDeclaration *getDefaultConstructor (SgClassDeclaration * classDeclaration); /*! \brief Return true if template definition is in the class, false if outside of class. */ // DQ (8/27/2005): bool templateDefinitionIsInClass (SgTemplateInstantiationMemberFunctionDecl * memberFunctionDeclaration); /*! \brief Generate a non-defining (forward) declaration from a defining function declaration. \internal should put into sageBuilder ? */ // DQ (9/17/2005): SgTemplateInstantiationMemberFunctionDecl* buildForwardFunctionDeclaration (SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation); //! Check if a SgNode is a declaration for a structure bool isStructDeclaration(SgNode * node); //! Check if a SgNode is a declaration for a union bool isUnionDeclaration(SgNode * node); #if 0 // DQ (8/28/2005): This is already a member function of the SgFunctionDeclaration // (so that it can handle template functions and member functions) /*! \brief Return true if member function of a template member function, of false if a non-template member function in a templated class. */ // DQ (8/27/2005): bool isTemplateMemberFunction (SgTemplateInstantiationMemberFunctionDecl * memberFunctionDeclaration); #endif //@} //------------------------------------------------------------------------ //@{ /*! @name Misc. \brief Not sure the classifications right now */ // DQ (2/12/2012): Added some diagnostic support. //! Diagnostic function for tracing back through the parent list to understand at runtime where in the AST a failure happened. void whereAmI(SgNode* node); //! Extract a SgPragmaDeclaration's leading keyword . For example "#pragma omp parallel" has a keyword of "omp". std::string extractPragmaKeyword(const SgPragmaDeclaration *); //! Check if a node is SgOmp*Statement ROSE_DLL_API bool isOmpStatement(SgNode* ); /*! \brief Return true if function is overloaded. */ // DQ (8/27/2005): bool isOverloaded (SgFunctionDeclaration * functionDeclaration); // DQ (2/14/2012): Added support function used for variable declarations in conditionals. //! Support function used for variable declarations in conditionals void initializeIfStmt(SgIfStmt *ifstmt, SgStatement* conditional, SgStatement * true_body, SgStatement * false_body); //! Support function used for variable declarations in conditionals void initializeSwitchStatement(SgSwitchStatement* switchStatement,SgStatement *item_selector,SgStatement *body); //! Support function used for variable declarations in conditionals void initializeWhileStatement(SgWhileStmt* whileStatement, SgStatement * condition, SgStatement *body, SgStatement *else_body); //! Generate unique names for expressions and attach the names as persistent attributes ("UniqueNameAttribute") void annotateExpressionsWithUniqueNames (SgProject* project); //! Check if a SgNode is a main() function declaration ROSE_DLL_API bool isMain (const SgNode* node); // DQ (6/22/2005): /*! \brief Generate unique name from C and C++ constructs. The name may contain space. This is support for the AST merge, but is generally useful as a more general mechanism than name mangling which is more closely ties to the generation of names to support link-time function name resolution. This is more general than common name mangling in that it resolves more relevant differences between C and C++ declarations. (e.g. the type within the declaration: "struct { int:8; } foo;"). \implementation current work does not support expressions. */ std::string generateUniqueName ( const SgNode * node, bool ignoreDifferenceBetweenDefiningAndNondefiningDeclarations); /** Generate a name like __temp#__ that is unique in the current scope and any parent and children scopes. # is a unique integer counter. * @param baseName the word to be included in the variable names. */ std::string generateUniqueVariableName(SgScopeStatement* scope, std::string baseName = "temp"); // DQ (8/10/2010): Added const to first parameter. // DQ (3/10/2007): //! Generate a unique string from the source file position information std::string declarationPositionString (const SgDeclarationStatement * declaration); // DQ (1/20/2007): //! Added mechanism to generate project name from list of file names ROSE_DLL_API std::string generateProjectName (const SgProject * project, bool supressSuffix = false ); //! Given a SgExpression that represents a named function (or bound member //! function), return the mentioned function SgFunctionDeclaration* getDeclarationOfNamedFunction(SgExpression* func); //! Get the mask expression from the header of a SgForAllStatement SgExpression* forallMaskExpression(SgForAllStatement* stmt); //! Find all SgPntrArrRefExp under astNode, then add SgVarRefExp (if any) of SgPntrArrRefExp's dim_info into NodeList_t void addVarRefExpFromArrayDimInfo(SgNode * astNode, Rose_STL_Container<SgNode *>& NodeList_t); // DQ (10/6/2006): Added support for faster mangled name generation (caching avoids recomputation). /*! \brief Support for faster mangled name generation (caching avoids recomputation). */ #ifndef SWIG // DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time). void clearMangledNameCache (SgGlobal * globalScope); void resetMangledNameCache (SgGlobal * globalScope); #endif std::string getMangledNameFromCache (SgNode * astNode); std::string addMangledNameToCache (SgNode * astNode, const std::string & mangledName); SgDeclarationStatement * getNonInstantiatonDeclarationForClass (SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation); //! a better version for SgVariableDeclaration::set_baseTypeDefininingDeclaration(), handling all side effects automatically //! Used to have a struct declaration embedded into a variable declaration void setBaseTypeDefiningDeclaration(SgVariableDeclaration* var_decl, SgDeclarationStatement *base_decl); // DQ (10/14/2006): This function tests the AST to see if for a non-defining declaration, the // bool declarationPreceedsDefinition ( SgClassDeclaration* classNonDefiningDeclaration, SgClassDeclaration* classDefiningDeclaration ); //! Check if a defining declaration comes before of after the non-defining declaration. bool declarationPreceedsDefinition (SgDeclarationStatement *nonDefiningDeclaration, SgDeclarationStatement *definingDeclaration); // DQ (10/19/2006): Function calls have interesting context dependent rules to determine if // they are output with a global qualifier or not. Were this is true we have to avoid global // qualifiers, since the function's scope has not been defined. This is an example of where // qualification of function names in function calls are context dependent; an interesting // example of where the C++ language is not friendly to source-to-source processing :-). bool functionCallExpressionPreceedsDeclarationWhichAssociatesScope (SgFunctionCallExp * functionCall); /*! \brief Compute the intersection set for two ASTs. This is part of a test done by the copy function to compute those IR nodes in the copy that still reference the original AST. */ ROSE_DLL_API std::vector < SgNode * >astIntersection (SgNode * original, SgNode * copy, SgCopyHelp * help = NULL); //! Deep copy an arbitrary subtree ROSE_DLL_API SgNode* deepCopyNode (const SgNode* subtree); //! A template function for deep copying a subtree. It is also used to create deepcopy functions with specialized parameter and return types. e.g SgExpression* copyExpression(SgExpression* e); template <typename NodeType> NodeType* deepCopy (const NodeType* subtree) { return dynamic_cast<NodeType*>(deepCopyNode(subtree)); } //! Deep copy an expression ROSE_DLL_API SgExpression* copyExpression(SgExpression* e); //!Deep copy a statement ROSE_DLL_API SgStatement* copyStatement(SgStatement* s); // from VarSym.cc in src/midend/astOutlining/src/ASTtools //! Get the variable symbol for the first initialized name of a declaration stmt. ROSE_DLL_API SgVariableSymbol* getFirstVarSym (SgVariableDeclaration* decl); //! Get the first initialized name of a declaration statement ROSE_DLL_API SgInitializedName* getFirstInitializedName (SgVariableDeclaration* decl); //! A special purpose statement removal function, originally from inlinerSupport.h, Need Jeremiah's attention to refine it. Please don't use it for now. ROSE_DLL_API void myRemoveStatement(SgStatement* stmt); ROSE_DLL_API bool isConstantTrue(SgExpression* e); ROSE_DLL_API bool isConstantFalse(SgExpression* e); ROSE_DLL_API bool isCallToParticularFunction(SgFunctionDeclaration* decl, SgExpression* e); ROSE_DLL_API bool isCallToParticularFunction(const std::string& qualifiedName, size_t arity, SgExpression* e); //! Check if a declaration has a "static' modifier bool ROSE_DLL_API isStatic(SgDeclarationStatement* stmt); //! Set a declaration as static ROSE_DLL_API void setStatic(SgDeclarationStatement* stmt); //! Check if a declaration has an "extern" modifier ROSE_DLL_API bool isExtern(SgDeclarationStatement* stmt); //! Set a declaration as extern ROSE_DLL_API void setExtern(SgDeclarationStatement* stmt); //! Interface for creating a statement whose computation writes its answer into //! a given variable. class StatementGenerator { public: virtual ~StatementGenerator() {}; virtual SgStatement* generate(SgExpression* where_to_write_answer) = 0; }; //! Check if a SgNode _s is an assignment statement (any of =,+=,-=,&=,/=, ^=, etc) //! //! Return the left hand, right hand expressions and if the left hand variable is also being read bool isAssignmentStatement(SgNode* _s, SgExpression** lhs=NULL, SgExpression** rhs=NULL, bool* readlhs=NULL); //! Variable references can be introduced by SgVarRef, SgPntrArrRefExp, SgInitializedName, SgMemberFunctionRef etc. This function will convert them all to a top level SgInitializedName. ROSE_DLL_API SgInitializedName* convertRefToInitializedName(SgNode* current); //! Build an abstract handle from an AST node, reuse previously built handle when possible ROSE_DLL_API AbstractHandle::abstract_handle* buildAbstractHandle(SgNode*); //! Obtain a matching SgNode from an abstract handle string ROSE_DLL_API SgNode* getSgNodeFromAbstractHandleString(const std::string& input_string); //! Dump information about a SgNode for debugging ROSE_DLL_API void dumpInfo(SgNode* node, std::string desc=""); //! Reorder a list of declaration statements based on their appearance order in source files ROSE_DLL_API std::vector<SgDeclarationStatement*> sortSgNodeListBasedOnAppearanceOrderInSource(const std::vector<SgDeclarationStatement*>& nodevec); // DQ (4/13/2013): We need these to support the unparing of operators defined by operator syntax or member function names. //! Is an overloaded operator a prefix operator (e.g. address operator X * operator&(), dereference operator X & operator*(), unary plus operator X & operator+(), etc. // bool isPrefixOperator( const SgMemberFunctionRefExp* memberFunctionRefExp ); bool isPrefixOperator( SgExpression* exp ); //! Check for proper names of possible prefix operators (used in isPrefixOperator()). bool isPrefixOperatorName( const SgName & functionName ); //! Is an overloaded operator a postfix operator. (e.g. ). bool isPostfixOperator( SgExpression* exp ); //! Is an overloaded operator an index operator (also referred to as call or subscript operators). (e.g. X & operator()() or X & operator[]()). bool isIndexOperator( SgExpression* exp ); //@} //------------------------------------------------------------------------ //@{ /*! @name AST properties \brief version, language properties of current AST. */ // std::string version(); // utility_functions.h, version number /*! Brief These traverse the memory pool of SgFile IR nodes and determine what languages are in use! */ ROSE_DLL_API bool is_C_language (); ROSE_DLL_API bool is_OpenMP_language (); ROSE_DLL_API bool is_UPC_language (); //! Check if dynamic threads compilation is used for UPC programs ROSE_DLL_API bool is_UPC_dynamic_threads(); ROSE_DLL_API bool is_C99_language (); ROSE_DLL_API bool is_Cxx_language (); ROSE_DLL_API bool is_Java_language (); ROSE_DLL_API bool is_Fortran_language (); ROSE_DLL_API bool is_CAF_language (); ROSE_DLL_API bool is_PHP_language(); ROSE_DLL_API bool is_Python_language(); ROSE_DLL_API bool is_Cuda_language(); ROSE_DLL_API bool is_X10_language(); ROSE_DLL_API bool is_binary_executable(); ROSE_DLL_API bool is_mixed_C_and_Cxx_language (); ROSE_DLL_API bool is_mixed_Fortran_and_C_language (); ROSE_DLL_API bool is_mixed_Fortran_and_Cxx_language (); ROSE_DLL_API bool is_mixed_Fortran_and_C_and_Cxx_language (); //@} //------------------------------------------------------------------------ //@{ /*! @name Scope \brief */ // DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique // labels for scopes in a function (as required for name mangling). /*! \brief Assigns unique numbers to each SgScopeStatement of a function. This is used to provide unique names for variables and types defined is different nested scopes of a function (used in mangled name generation). */ void resetScopeNumbers (SgFunctionDefinition * functionDeclaration); // DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique // labels for scopes in a function (as required for name mangling). /*! \brief Clears the cache of scope,integer pairs for the input function. This is used to clear the cache of computed unique labels for scopes in a function. This function should be called after any transformation on a function that might effect the allocation of scopes and cause the existing unique numbers to be incorrect. This is part of support to provide unique names for variables and types defined is different nested scopes of a function (used in mangled name generation). */ void clearScopeNumbers (SgFunctionDefinition * functionDefinition); //!Find the enclosing namespace of a declaration SgNamespaceDefinitionStatement * enclosingNamespaceScope (SgDeclarationStatement * declaration); // SgNamespaceDefinitionStatement * getEnclosingNamespaceScope (SgNode * node); bool isPrototypeInScope (SgScopeStatement * scope, SgFunctionDeclaration * functionDeclaration, SgDeclarationStatement * startingAtDeclaration); //!check if node1 is a strict ancestor of node 2. (a node is not considered its own ancestor) bool ROSE_DLL_API isAncestor(SgNode* node1, SgNode* node2); //@} //------------------------------------------------------------------------ //@{ /*! @name Preprocessing Information \brief #if-#else-#end, comments, #include, etc */ //! Dumps a located node's preprocessing information. void dumpPreprocInfo (SgLocatedNode* locatedNode); //! Insert #include "filename" or #include <filename> (system header) into the global scope containing the current scope, right after other #include XXX. ROSE_DLL_API PreprocessingInfo* insertHeader(const std::string& filename, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::after, bool isSystemHeader=false, SgScopeStatement* scope=NULL); //! Identical to movePreprocessingInfo(), except for the stale name and confusing order of parameters. It will be deprecated soon. ROSE_DLL_API void moveUpPreprocessingInfo (SgStatement* stmt_dst, SgStatement* stmt_src, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef, PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false); //! Move preprocessing information of stmt_src to stmt_dst, Only move preprocessing information from the specified source-relative position to a specified target position, otherwise move all preprocessing information with position information intact. The preprocessing information is appended to the existing preprocessing information list of the target node by default. Prepending is used if usePreprend is set to true. Optionally, the relative position can be adjust after the moving using dst_position. ROSE_DLL_API void movePreprocessingInfo (SgStatement* stmt_src, SgStatement* stmt_dst, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef, PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false); //!Cut preprocessing information from a source node and save it into a buffer. Used in combination of pastePreprocessingInfo(). The cut-paste operation is similar to moveUpPreprocessingInfo() but it is more flexible in that the destination node can be unknown during the cut operation. ROSE_DLL_API void cutPreprocessingInfo (SgLocatedNode* src_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& save_buf); //!Paste preprocessing information from a buffer to a destination node. Used in combination of cutPreprocessingInfo() ROSE_DLL_API void pastePreprocessingInfo (SgLocatedNode* dst_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& saved_buf); //! Attach an arbitrary string to a located node. A workaround to insert irregular statements or vendor-specific attributes. ROSE_DLL_API PreprocessingInfo* attachArbitraryText(SgLocatedNode* target, const std::string & text, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before); //!Check if a pragma declaration node has macro calls attached, if yes, replace macro calls within the pragma string with expanded strings. This only works if -rose:wave is turned on. ROSE_DLL_API void replaceMacroCallsWithExpandedStrings(SgPragmaDeclaration* target); //@} //------------------------------------------------------------------------ //@{ /*! @name Source File Position \brief set Sg_File_Info for a SgNode */ //! Build and attach comment, comment style is inferred from the language type of the target node if not provided ROSE_DLL_API PreprocessingInfo* attachComment(SgLocatedNode* target, const std::string & content, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before, PreprocessingInfo::DirectiveType dtype= PreprocessingInfo::CpreprocessorUnknownDeclaration); // DQ (11/25/2009): Added matching support for adding comments to SgAsm nodes. // Build and attach comment // void attachComment(SgAsmStatement* target, const std::string & content ); // DQ (7/20/2008): I am not clear were I should put this function, candidates include: SgLocatedNode or SgInterface //! Add a string to be unparsed to support code generation for back-end specific tools or compilers. ROSE_DLL_API void addTextForUnparser ( SgNode* astNode, std::string s, AstUnparseAttribute::RelativePositionType inputlocation ); // ************************************************************************ // Newer versions of now depricated functions // ************************************************************************ // DQ (5/1/2012): This function queries the SageBuilder::SourcePositionClassification mode (stored in the SageBuilder // interface) and used the specified mode to initialize the source position data (Sg_File_Info objects). This // function is the only function that should be called directly (though in a namespace we can't define permissions). //! Set the source code positon for the current (input) node. ROSE_DLL_API void setSourcePosition(SgNode* node); // A better name might be "setSourcePositionForSubTree" //! Set the source code positon for the subtree (including the root). ROSE_DLL_API void setSourcePositionAtRootAndAllChildren(SgNode *root); // DQ (5/1/2012): New function with improved name (still preserving the previous interface). // This function is not required once the new mechanism defining a source position mode is complete (shortly). //! Set subtree as a transformation. // void setSourcePositionAtRootAndAllChildrenAsTransformation(SgNode *root); // void setSourcePositionAtRootAndAllChildrenAsDefault(SgNode *root); // Removed to force use of the API and permit flexability in the lower level implementation. //! DQ (5/1/2012): New function with improved name. // void setSourcePositionToDefault( SgLocatedNode* locatedNode ); template<class T> void setSourcePositionToDefault( T* node ); //! DQ (5/1/2012): New function with improved name. void setSourcePositionAsTransformation(SgNode *node); // DQ (5/1/2012): Newly renamed function (previous name preserved for backward compatability). void setSourcePositionPointersToNull(SgNode *node); // ************************************************************************ // ************************************************************************ // Older deprecated functions // ************************************************************************ // Liao, 1/8/2007, set file info. for a whole subtree as transformation generated //! Set current node's source position as transformation generated ROSE_DLL_API void setOneSourcePositionForTransformation(SgNode *node); //! Set current node's source position as NULL ROSE_DLL_API void setOneSourcePositionNull(SgNode *node); //! Recursively set source position info(Sg_File_Info) as transformation generated ROSE_DLL_API void setSourcePositionForTransformation (SgNode * root); //! Set source position info(Sg_File_Info) as transformation generated for all SgNodes in memory pool ROSE_DLL_API void setSourcePositionForTransformation_memoryPool(); //! Set the source position of SgLocatedNode to Sg_File_Info::generateDefaultFileInfo(). These nodes WILL be unparsed. Not for transformation usage. // ROSE_DLL_API void setSourcePosition (SgLocatedNode * locatedNode); // ************************************************************************ //@} //------------------------------------------------------------------------ //@{ /*! @name Data types \brief */ // from src/midend/astInlining/typeTraits.h // src/midend/astUtil/astInterface/AstInterface.h //! Get the right bool type according to C or C++ language input SgType* getBoolType(SgNode* n); //! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long. ////! ////! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool to be treated as integer types ROSE_DLL_API bool isStrictIntegerType(SgType* t); //!Get the data type of the first initialized name of a declaration statement ROSE_DLL_API SgType* getFirstVarType(SgVariableDeclaration* decl); //! Is a type default constructible? This may not quite work properly. ROSE_DLL_API bool isDefaultConstructible(SgType* type); //! Is a type copy constructible? This may not quite work properly. ROSE_DLL_API bool isCopyConstructible(SgType* type); //! Is a type assignable? This may not quite work properly. ROSE_DLL_API bool isAssignable(SgType* type); #ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT //! Check if a class type is a pure virtual class. True means that there is at least //! one pure virtual function that has not been overridden. //! In the case of an incomplete class type (forward declaration), this function returns false. ROSE_DLL_API bool isPureVirtualClass(SgType* type, const ClassHierarchyWrapper& classHierarchy); #endif //! Does a type have a trivial (built-in) destructor? ROSE_DLL_API bool hasTrivialDestructor(SgType* t); //! Is this type a non-constant reference type? (Handles typedefs correctly) ROSE_DLL_API bool isNonconstReference(SgType* t); //! Is this type a const or non-const reference type? (Handles typedefs correctly) ROSE_DLL_API bool isReferenceType(SgType* t); //! Is this type a pointer type? (Handles typedefs correctly) ROSE_DLL_API bool isPointerType(SgType* t); //! Is this a pointer to a non-const type? Note that this function will return true for const pointers pointing to //! non-const types. For example, (int* const y) points to a modifiable int, so this function returns true. Meanwhile, //! it returns false for (int const * x) and (int const * const x) because these types point to a const int. //! Also, only the outer layer of nested pointers is unwrapped. So the function returns true for (const int ** y), but returns //! false for const (int * const * x) ROSE_DLL_API bool isPointerToNonConstType(SgType* type); //! Is this a const type? /* const char* p = "aa"; is not treated as having a const type. It is a pointer to const char. * Similarly, neither for const int b[10]; or const int & c =10; * The standard says, "A compound type is not cv-qualified by the cv-qualifiers (if any) of the types from which it is compounded. Any cv-qualifiers applied to an array type affect the array element type, not the array type". */ ROSE_DLL_API bool isConstType(SgType* t); //! Remove const (if present) from a type. stripType() cannot do this because it removes all modifiers. SgType* removeConst(SgType* t); //! Is this a volatile type? ROSE_DLL_API bool isVolatileType(SgType* t); //! Is this a restrict type? ROSE_DLL_API bool isRestrictType(SgType* t); //! Is this a scalar type? /*! We define the following SgType as scalar types: char, short, int, long , void, Wchar, Float, double, long long, string, bool, complex, imaginary */ ROSE_DLL_API bool isScalarType(SgType* t); //! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long. //! //! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool. ROSE_DLL_API bool isStrictIntegerType(SgType* t); //! Check if a type is a struct type (a special SgClassType in ROSE) ROSE_DLL_API bool isStructType(SgType* t); //! Generate a mangled string for a given type based on Itanium C++ ABI ROSE_DLL_API std::string mangleType(SgType* type); //! Generate mangled scalar type names according to Itanium C++ ABI, the input type should pass isScalarType() in ROSE ROSE_DLL_API std::string mangleScalarType(SgType* type); //! Generated mangled modifier types, include const, volatile,according to Itanium C++ ABI, with extension to handle UPC shared types. ROSE_DLL_API std::string mangleModifierType(SgModifierType* type); //! Calculate the number of elements of an array type: dim1* dim2*... , assume element count is 1 for int a[]; Strip off THREADS if it is a UPC array. ROSE_DLL_API size_t getArrayElementCount(SgArrayType* t); //! Get the number of dimensions of an array type ROSE_DLL_API int getDimensionCount(SgType* t); //! Get the element type of an array ROSE_DLL_API SgType* getArrayElementType(SgType* t); //! Get the element type of an array, pointer or string, or NULL if not applicable ROSE_DLL_API SgType* getElementType(SgType* t); /// \brief returns the array dimensions in an array as defined for arrtype /// \param arrtype the type of a C/C++ array /// \return an array that contains an expression indicating each dimension's size. /// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which /// becomes responsible for freeing the expressions). /// Note, the first entry of the array is a SgNullExpression, iff the /// first array dimension was not specified. /// \code /// int x[] = { 1, 2, 3 }; /// \endcode /// note, the expression does not have to be a constant /// \code /// int x[i*5]; /// \endcode /// \post return-value.empty() == false /// \post return-value[*] != NULL (no nullptr in the returned vector) std::vector<SgExpression*> get_C_array_dimensions(const SgArrayType& arrtype); /// \brief returns the array dimensions in an array as defined for arrtype /// \param arrtype the type of a C/C++ array /// \param varref a reference to an array variable (the variable of type arrtype) /// \return an array that contains an expression indicating each dimension's size. /// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which /// becomes responsible for freeing the expressions). /// If the first array dimension was not specified an expression /// that indicates that size is generated. /// \code /// int x[][3] = { 1, 2, 3, 4, 5, 6 }; /// \endcode /// the entry for the first dimension will be: /// \code /// // 3 ... size of 2nd dimension /// sizeof(x) / (sizeof(int) * 3) /// \endcode /// \pre arrtype is the array-type of varref /// \post return-value.empty() == false /// \post return-value[*] != NULL (no nullptr in the returned vector) /// \post !isSgNullExpression(return-value[*]) std::vector<SgExpression*> get_C_array_dimensions(const SgArrayType& arrtype, const SgVarRefExp& varref); /// \overload /// \note see get_C_array_dimensions for SgVarRefExp for details. /// \todo make initname const std::vector<SgExpression*> get_C_array_dimensions(const SgArrayType& arrtype, SgInitializedName& initname); //! Check if an expression is an array access (SgPntrArrRefExp). If so, return its name expression and subscripts if requested. Users can use convertRefToInitializedName() to get the possible name. It does not check if the expression is a top level SgPntrArrRefExp. ROSE_DLL_API bool isArrayReference(SgExpression* ref, SgExpression** arrayNameExp=NULL, std::vector<SgExpression*>** subscripts=NULL); //! Has a UPC shared type of any kinds (shared-to-shared, private-to-shared, shared-to-private, shared scalar/array)? An optional parameter, mod_type_out, stores the first SgModifierType with UPC access information. /*! * Note: we classify private-to-shared as 'has shared' type for convenience here. It is indeed a private type in strict sense. AST graph for some examples: - shared scalar: SgModifierType -->base type - shared array: SgArrayType --> SgModiferType --> base type - shared to shared: SgModifierType --> SgPointerType --> SgModifierType ->SgTypeInt - shared to private: SgModifierType --> SgPointerType --> base type - private to shared: SgPointerType --> SgModifierType --> base type */ ROSE_DLL_API bool hasUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL ); //! Check if a type is a UPC shared type, including shared array, shared pointers etc. Exclude private pointers to shared types. Optionally return the modifier type with the UPC shared property. /*! * ROSE uses SgArrayType of SgModifierType to represent shared arrays, not SgModifierType points to SgArrayType. Also typedef may cause a chain of nodes before reach the actual SgModifierType with UPC shared property. */ ROSE_DLL_API bool isUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL); //! Check if a modifier type is a UPC shared type. ROSE_DLL_API bool isUpcSharedModifierType (SgModifierType* mod_type); //! Check if an array type is a UPC shared type. ROSE AST represents a UPC shared array as regular array of elements of UPC shared Modifier Type. Not directly a UPC shared Modifier Type of an array. ROSE_DLL_API bool isUpcSharedArrayType (SgArrayType* array_type); //! Check if a shared UPC type is strict memory consistency or not. Return false if it is relaxed. (So isUpcRelaxedSharedModifierType() is not necessary.) ROSE_DLL_API bool isUpcStrictSharedModifierType(SgModifierType* mode_type); //! Get the block size of a UPC shared modifier type ROSE_DLL_API size_t getUpcSharedBlockSize(SgModifierType* mod_type); //! Get the block size of a UPC shared type, including Modifier types and array of modifier types (shared arrays) ROSE_DLL_API size_t getUpcSharedBlockSize(SgType* t); //! Is UPC phase-less shared type? Phase-less means block size of the first SgModifierType with UPC information is 1 or 0/unspecified. Also return false if the type is not a UPC shared type. ROSE_DLL_API bool isUpcPhaseLessSharedType (SgType* t); //! Is a UPC private-to-shared pointer? SgPointerType comes first compared to SgModifierType with UPC information. Input type must be any of UPC shared types first. ROSE_DLL_API bool isUpcPrivateToSharedType(SgType* t); //! Is a UPC array with dimension of X*THREADS ROSE_DLL_API bool isUpcArrayWithThreads(SgArrayType* t); //! Lookup a named type based on its name, bottomup searching from a specified scope. Note name collison might be allowed for c (not C++) between typedef and enum/struct. Only the first matched named type will be returned in this case. typedef is returned as it is, not the base type it actually refers to. ROSE_DLL_API SgType* lookupNamedTypeInParentScopes(const std::string& type_name, SgScopeStatement* scope=NULL); // DQ (7/22/2014): Added support for comparing expression types in actual arguments with those expected from the formal function parameter types. //! Get the type of the associated argument expression from the function type. ROSE_DLL_API SgType* getAssociatedTypeFromFunctionTypeList(SgExpression* actual_argument_expression); //@} //------------------------------------------------------------------------ //@{ /*! @name Loop handling \brief */ // by Jeremiah //! Add a step statement to the end of a loop body //! Add a new label to the end of the loop, with the step statement after //! it; then change all continue statements in the old loop body into //! jumps to the label //! //! For example: //! while (a < 5) {if (a < -3) continue;} (adding "a++" to end) becomes //! while (a < 5) {if (a < -3) goto label; label: a++;} ROSE_DLL_API void addStepToLoopBody(SgScopeStatement* loopStmt, SgStatement* step); ROSE_DLL_API void moveForStatementIncrementIntoBody(SgForStatement* f); ROSE_DLL_API void convertForToWhile(SgForStatement* f); ROSE_DLL_API void convertAllForsToWhiles(SgNode* top); //! Change continue statements in a given block of code to gotos to a label ROSE_DLL_API void changeContinuesToGotos(SgStatement* stmt, SgLabelStatement* label); //!Return the loop index variable for a for loop ROSE_DLL_API SgInitializedName* getLoopIndexVariable(SgNode* loop); //!Check if a SgInitializedName is used as a loop index within a AST subtree //! This function will use a bottom-up traverse starting from the subtree_root to find all enclosing loops and check if ivar is used as an index for either of them. ROSE_DLL_API bool isLoopIndexVariable(SgInitializedName* ivar, SgNode* subtree_root); //! Routines to get and set the body of a loop ROSE_DLL_API SgStatement* getLoopBody(SgScopeStatement* loop); ROSE_DLL_API void setLoopBody(SgScopeStatement* loop, SgStatement* body); //! Routines to get the condition of a loop. It recognize While-loop, For-loop, and Do-While-loop ROSE_DLL_API SgStatement* getLoopCondition(SgScopeStatement* loop); //! Set the condition statement of a loop, including While-loop, For-loop, and Do-While-loop. ROSE_DLL_API void setLoopCondition(SgScopeStatement* loop, SgStatement* cond); //! Check if a for-loop has a canonical form, return loop index, bounds, step, and body if requested //! //! A canonical form is defined as : one initialization statement, a test expression, and an increment expression , loop index variable should be of an integer type. IsInclusiveUpperBound is true when <= or >= is used for loop condition ROSE_DLL_API bool isCanonicalForLoop(SgNode* loop, SgInitializedName** ivar=NULL, SgExpression** lb=NULL, SgExpression** ub=NULL, SgExpression** step=NULL, SgStatement** body=NULL, bool *hasIncrementalIterationSpace = NULL, bool* isInclusiveUpperBound = NULL); //! Check if a Fortran Do loop has a complete canonical form: Do I=1, 10, 1 ROSE_DLL_API bool isCanonicalDoLoop(SgFortranDo* loop,SgInitializedName** ivar/*=NULL*/, SgExpression** lb/*=NULL*/, SgExpression** ub/*=NULL*/, SgExpression** step/*=NULL*/, SgStatement** body/*=NULL*/, bool *hasIncrementalIterationSpace/*= NULL*/, bool* isInclusiveUpperBound/*=NULL*/); //! Set the lower bound of a loop header for (i=lb; ...) ROSE_DLL_API void setLoopLowerBound(SgNode* loop, SgExpression* lb); //! Set the upper bound of a loop header,regardless the condition expression type. for (i=lb; i op up, ...) ROSE_DLL_API void setLoopUpperBound(SgNode* loop, SgExpression* ub); //! Set the stride(step) of a loop 's incremental expression, regardless the expression types (i+=s; i= i+s, etc) ROSE_DLL_API void setLoopStride(SgNode* loop, SgExpression* stride); //! Normalize loop init stmt by promoting the single variable declaration statement outside of the for loop header's init statement, e.g. for (int i=0;) becomes int i_x; for (i_x=0;..) and rewrite the loop with the new index variable, if necessary ROSE_DLL_API bool normalizeForLoopInitDeclaration(SgForStatement* loop); //! Normalize a for loop, return true if successful. Generated constants will be fold by default. //! //! Translations are : //! For the init statement: for (int i=0;... ) becomes int i; for (i=0;..) //! For test expression: //! i<x is normalized to i<= (x-1) and //! i>x is normalized to i>= (x+1) //! For increment expression: //! i++ is normalized to i+=1 and //! i-- is normalized to i+=-1 //! i-=s is normalized to i+= -s ROSE_DLL_API bool forLoopNormalization(SgForStatement* loop, bool foldConstant = true); //!Normalize a Fortran Do loop. Make the default increment expression (1) explicit ROSE_DLL_API bool doLoopNormalization(SgFortranDo* loop); //! Unroll a target loop with a specified unrolling factor. It handles steps larger than 1 and adds a fringe loop if the iteration count is not evenly divisible by the unrolling factor. ROSE_DLL_API bool loopUnrolling(SgForStatement* loop, size_t unrolling_factor); //! Interchange/permutate a n-level perfectly-nested loop rooted at 'loop' using a lexicographical order number within (0,depth!). ROSE_DLL_API bool loopInterchange(SgForStatement* loop, size_t depth, size_t lexicoOrder); //! Tile the n-level (starting from 1) loop of a perfectly nested loop nest using tiling size s ROSE_DLL_API bool loopTiling(SgForStatement* loopNest, size_t targetLevel, size_t tileSize); //Winnie Loop Collapsing SgExprListExp * loopCollapsing(SgForStatement* target_loop, size_t collapsing_factor); //@} //------------------------------------------------------------------------ //@{ /*! @name Topdown search \brief Top-down traversal from current node to find a node of a specified type */ //! Query a subtree to get all nodes of a given type, with an appropriate downcast. template <typename NodeType> std::vector<NodeType*> querySubTree(SgNode* top, VariantT variant = (VariantT)NodeType::static_variant) { Rose_STL_Container<SgNode*> nodes = NodeQuery::querySubTree(top,variant); std::vector<NodeType*> result(nodes.size(), NULL); int count = 0; for (Rose_STL_Container<SgNode*>::const_iterator i = nodes.begin(); i != nodes.end(); ++i, ++count) { NodeType* node = dynamic_cast<NodeType*>(*i); ROSE_ASSERT (node); result[count] = node; } return result; } /*! \brief Returns STL vector of SgFile IR node pointers. Demonstrates use of restricted traversal over just SgFile IR nodes. */ std::vector < SgFile * >generateFileList (); /** Get the current SgProject IR Node. * * The library should never have more than one project and it asserts such. If no project has been created yet then this * function returns the null pointer. */ ROSE_DLL_API SgProject * getProject(); //! Query memory pools to grab SgNode of a specified type template <typename NodeType> static std::vector<NodeType*> getSgNodeListFromMemoryPool() { // This function uses a memory pool traversal specific to the SgFile IR nodes class MyTraversal : public ROSE_VisitTraversal { public: std::vector<NodeType*> resultlist; void visit ( SgNode* node) { NodeType* result = dynamic_cast<NodeType* > (node); ROSE_ASSERT(result!= NULL); if (result!= NULL) { resultlist.push_back(result); } }; virtual ~MyTraversal() {} }; MyTraversal my_traversal; NodeType::visitRepresentativeNode(my_traversal); return my_traversal.resultlist; } /*! \brief top-down traversal from current node to find the main() function declaration */ ROSE_DLL_API SgFunctionDeclaration* findMain(SgNode* currentNode); //! Find the last declaration statement within a scope (if any). This is often useful to decide where to insert another declaration statement SgStatement* findLastDeclarationStatement(SgScopeStatement * scope); //midend/programTransformation/partialRedundancyElimination/pre.h //! Find referenced symbols within an expression std::vector<SgVariableSymbol*> getSymbolsUsedInExpression(SgExpression* expr); //! Find break statements inside a particular statement, stopping at nested loops or switches /*! loops or switch statements defines their own contexts for break statements. The function will stop immediately if run on a loop or switch statement. If fortranLabel is non-empty, breaks (EXITs) to that label within nested loops are included in the returned list. */ std::vector<SgBreakStmt*> findBreakStmts(SgStatement* code, const std::string& fortranLabel = ""); //! Find all continue statements inside a particular statement, stopping at nested loops /*! Nested loops define their own contexts for continue statements. The function will stop immediately if run on a loop statement. If fortranLabel is non-empty, continues (CYCLEs) to that label within nested loops are included in the returned list. */ std::vector<SgContinueStmt*> findContinueStmts(SgStatement* code, const std::string& fortranLabel = ""); std::vector<SgGotoStatement*> findGotoStmts(SgStatement* scope, SgLabelStatement* l); std::vector<SgStatement*> getSwitchCases(SgSwitchStatement* sw); //! Topdown traverse a subtree from root to find the first declaration given its name, scope (optional, can be NULL), and defining or nondefining flag. template <typename T> T* findDeclarationStatement(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining) { bool found = false; if (!root) return 0; T* decl = dynamic_cast<T*>(root); if (decl!=NULL) { if (scope) { if ((decl->get_scope() == scope)&& (decl->search_for_symbol_from_symbol_table()->get_name()==name)) { found = true; } } else // Liao 2/9/2010. We should allow NULL scope { if(decl->search_for_symbol_from_symbol_table()->get_name()==name) { found = true; } } } if (found) { if (isDefining) { ROSE_ASSERT (decl->get_definingDeclaration() != NULL); return dynamic_cast<T*> (decl->get_definingDeclaration()); } else return decl; } std::vector<SgNode*> children = root->get_traversalSuccessorContainer(); for (std::vector<SgNode*>::const_iterator i = children.begin(); i != children.end(); ++i) { T* target= findDeclarationStatement<T> (*i,name, scope, isDefining); if (target) return target; } return 0; } //! Topdown traverse a subtree from root to find the first function declaration matching the given name, scope (optional, can be NULL), and defining or nondefining flag. This is an instantiation of findDeclarationStatement<T>. SgFunctionDeclaration* findFunctionDeclaration(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining); #if 0 //TODO // 1. preorder traversal from current SgNode till find next SgNode of type V_SgXXX // until reach the end node SgNode* getNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL); // 2. return all nodes of type VariantT following the source node std::vector<SgNode*> getAllNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL); #endif //@} //------------------------------------------------------------------------ //@{ /*! @name Bottom up search \brief Backwards traverse through the AST to find a node, findEnclosingXXX() */ // remember to put const to all arguments. /** Find a node by type using upward traversal. * * Traverse backward through a specified node's ancestors, starting with the node's parent and progressing to more distant * ancestors, to find the first node matching the specified or derived type. If @p includingSelf is true then the * starting node, @p astNode, is returned if its type matches, otherwise the search starts at the parent of @p astNode. * * For the purposes of this function, the parent (P) of an SgDeclarationStatement node (N) is considered to be the first * non-defining declaration of N if N has both a defining declaration and a first non-defining declaration and the defining * declaration is different than the first non-defining declaration. * * If no ancestor of the requisite type of subtypes is found then this function returns a null pointer. * * If @p astNode is the null pointer, then the return value is a null pointer. That is, if there is no node, then there cannot * be an enclosing node of the specified type. */ template <typename NodeType> NodeType* getEnclosingNode(const SgNode* astNode, const bool includingSelf = false) { #if 1 // DQ (10/20/2012): This is the older version of this implementation. Until I am sure that // the newer version (below) is what we want to use I will resolve this conflict by keeping // the previousl version in place. if (NULL == astNode) { return NULL; } if ( (includingSelf ) && (dynamic_cast<const NodeType*>(astNode)) ) { return const_cast<NodeType*>(dynamic_cast<const NodeType*> (astNode)); } // DQ (3/5/2012): Check for reference to self... ROSE_ASSERT(astNode->get_parent() != astNode); SgNode* parent = astNode->get_parent(); // DQ (3/5/2012): Check for loops that will cause infinite loops. SgNode* previouslySeenParent = parent; bool foundCycle = false; while ( (foundCycle == false) && (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) ) { ROSE_ASSERT(parent->get_parent() != parent); #if 0 printf ("In getEnclosingNode(): parent = %p = %s \n",parent,parent->class_name().c_str()); #endif parent = parent->get_parent(); // DQ (3/5/2012): Check for loops that will cause infinite loops. // ROSE_ASSERT(parent != previouslySeenParent); if (parent == previouslySeenParent) { foundCycle = true; } } #if 0 printf ("previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str()); #endif parent = previouslySeenParent; SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent); if (declarationStatement != NULL) { #if 0 printf ("Found a SgDeclarationStatement \n"); #endif SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration(); SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration(); #if 0 printf (" --- declarationStatement = %p \n",declarationStatement); printf (" --- definingDeclaration = %p \n",definingDeclaration); if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL) printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str()); printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration); if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL) printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str()); #endif if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration) { #if 0 printf ("Found a nondefining declaration so use the non-defining declaration instead \n"); #endif // DQ (10/19/2012): Use the defining declaration instead. // parent = firstNondefiningDeclaration; parent = definingDeclaration; } } #if 0 printf ("reset: previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str()); #endif // DQ (10/19/2012): This branch is just to document the cycle that was previously detected, it is for // debugging only. Thus it ony make sense for it to be executed when "(foundCycle == true)". However, // this will have to be revisited later since it appears clear that it is a problem for the binary analysis // work when it is visited for this case. Since the cycle is detected, but there is no assertion on the // cycle, we don't exit when a cycle is identified (which is the point of the code below). // Note also that I have fixed the code (above and below) to only chase pointers through defining // declarations (where they exist), this is important since non-defining declarations can be almost // anywhere (and thus chasing them can make it appear that there are cycles where there are none // (I think); test2012_234.C demonstrates an example of this. // DQ (10/9/2012): Robb has suggested this change to fix the binary analysis work. // if (foundCycle == true) if (foundCycle == false) { while ( (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) ) { ROSE_ASSERT(parent->get_parent() != parent); #if 0 printf ("In getEnclosingNode() (2nd try): parent = %p = %s \n",parent,parent->class_name().c_str()); if (parent->get_file_info() != NULL) parent->get_file_info()->display("In getEnclosingNode() (2nd try): debug"); #endif SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent); if (declarationStatement != NULL) { #if 0 printf ("Found a SgDeclarationStatement \n"); #endif SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration(); SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration(); #if 0 printf (" --- declarationStatement = %p = %s \n",declarationStatement,(declarationStatement != NULL) ? declarationStatement->class_name().c_str() : "null"); printf (" --- definingDeclaration = %p \n",definingDeclaration); if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL) printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str()); printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration); if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL) printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str()); #endif if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration) { #if 0 printf ("Found a nondefining declaration so use the firstNondefining declaration instead \n"); #endif // DQ (10/19/2012): Use the defining declaration instead. // parent = firstNondefiningDeclaration; parent = definingDeclaration; } } parent = parent->get_parent(); #if 1 // DQ (3/5/2012): Check for loops that will cause infinite loops. ROSE_ASSERT(parent != previouslySeenParent); #else printf ("WARNING::WARNING::WARNING commented out assertion for parent != previouslySeenParent \n"); if (parent == previouslySeenParent) break; #endif } } return const_cast<NodeType*>(dynamic_cast<const NodeType*> (parent)); #else // DQ (10/20/2012): Using Robb's newer version with my modification to use the definingDeclaration rather than firstNondefiningDeclaration (below). // Find the parent of specified type, but watch out for cycles in the ancestry (which would cause an infinite loop). // Cast away const because isSg* functions aren't defined for const node pointers; and our return is not const. SgNode *node = const_cast<SgNode*>(!astNode || includingSelf ? astNode : astNode->get_parent()); std::set<const SgNode*> seen; // nodes we've seen, in order to detect cycles while (node) { if (NodeType *found = dynamic_cast<NodeType*>(node)) return found; // FIXME: Cycle detection could be moved elsewhere so we don't need to do it on every call. [RPM 2012-10-09] ROSE_ASSERT(seen.insert(node).second); // Traverse to parent (declaration statements are a special case) if (SgDeclarationStatement *declarationStatement = isSgDeclarationStatement(node)) { SgDeclarationStatement *definingDeclaration = declarationStatement->get_definingDeclaration(); SgDeclarationStatement *firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration(); if (definingDeclaration && firstNondefiningDeclaration && declarationStatement != firstNondefiningDeclaration) { // DQ (10/19/2012): Use the defining declaration instead. // node = firstNondefiningDeclaration; node = definingDeclaration; } } else { node = node->get_parent(); } } return NULL; #endif } //! Find enclosing source file node ROSE_DLL_API SgSourceFile* getEnclosingSourceFile(SgNode* n, const bool includingSelf=false); //! Get the closest scope from astNode. Return astNode if it is already a scope. ROSE_DLL_API SgScopeStatement* getScope(const SgNode* astNode); //! Get the enclosing scope from a node n ROSE_DLL_API SgScopeStatement* getEnclosingScope(SgNode* n, const bool includingSelf=false); //! Traverse back through a node's parents to find the enclosing global scope ROSE_DLL_API SgGlobal* getGlobalScope( const SgNode* astNode); //! Find the function definition ROSE_DLL_API SgFunctionDefinition* getEnclosingProcedure(SgNode* n, const bool includingSelf=false); ROSE_DLL_API SgFunctionDefinition* getEnclosingFunctionDefinition(SgNode* astNode, const bool includingSelf=false); //! Find the closest enclosing statement, including the given node ROSE_DLL_API SgStatement* getEnclosingStatement(SgNode* n); //! Find the closest switch outside a given statement (normally used for case and default statements) ROSE_DLL_API SgSwitchStatement* findEnclosingSwitch(SgStatement* s); //! Find the closest loop outside the given statement; if fortranLabel is not empty, the Fortran label of the loop must be equal to it ROSE_DLL_API SgScopeStatement* findEnclosingLoop(SgStatement* s, const std::string& fortranLabel = "", bool stopOnSwitches = false); //! Find the enclosing function declaration, including its derived instances like isSgProcedureHeaderStatement, isSgProgramHeaderStatement, and isSgMemberFunctionDeclaration. ROSE_DLL_API SgFunctionDeclaration * getEnclosingFunctionDeclaration (SgNode * astNode, const bool includingSelf=false); //roseSupport/utility_functions.h //! get the SgFile node from current node ROSE_DLL_API SgFile* getEnclosingFileNode (SgNode* astNode ); //! Get the initializer containing an expression if it is within an initializer. ROSE_DLL_API SgInitializer* getInitializerOfExpression(SgExpression* n); //! Get the closest class definition enclosing the specified AST node, ROSE_DLL_API SgClassDefinition* getEnclosingClassDefinition(SgNode* astnode, const bool includingSelf=false); // TODO #if 0 SgNode * getEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL); std::vector<SgNode *> getAllEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL); SgVariableDeclaration* findVariableDeclaratin( const string& varname) SgClassDeclaration* getEnclosingClassDeclaration( const SgNode* astNode); // e.g. for some expression, find its parent statement SgStatement* getEnclosingStatement(const SgNode* astNode); SgSwitchStatement* getEnclosingSwitch(SgStatement* s); SgModuleStatement* getEnclosingModuleStatement( const SgNode* astNode); // used to build a variable reference for compiler generated code in current scope SgSymbol * findReachingDefinition (SgScopeStatement* startScope, SgName &name); #endif //@} //------------------------------------------------------------------------ //@{ /*! @name AST Walk and Traversal \brief */ // Liao, 1/9/2008 /*! \brief return the first global scope under current project */ ROSE_DLL_API SgGlobal * getFirstGlobalScope(SgProject *project); /*! \brief get the last statement within a scope, return NULL if it does not exit */ ROSE_DLL_API SgStatement* getLastStatement(SgScopeStatement *scope); //! Get the first statement within a scope, return NULL if it does not exist. Skip compiler-generated statement by default. Count transformation-generated ones, but excluding those which are not to be outputted in unparsers. ROSE_DLL_API SgStatement* getFirstStatement(SgScopeStatement *scope,bool includingCompilerGenerated=false); //!Find the first defining function declaration statement in a scope ROSE_DLL_API SgFunctionDeclaration* findFirstDefiningFunctionDecl(SgScopeStatement* scope); //! Get next statement within the same scope of current statement ROSE_DLL_API SgStatement* getNextStatement(SgStatement * currentStmt); //! Get previous statement within the same scope of current statement ROSE_DLL_API SgStatement* getPreviousStatement(SgStatement * currentStmt); #if 0 //TODO // preorder traversal from current SgNode till find next SgNode of type V_SgXXX SgNode* getNextSgNode( const SgNode* currentNode, VariantT=V_SgNode); #endif //@} //------------------------------------------------------------------------ //@{ /*! @name AST Comparison \brief Compare AST nodes, subtree, etc */ //! Check if a SgIntVal node has a given value ROSE_DLL_API bool isEqualToIntConst(SgExpression* e, int value); //! Check if two function declarations refer to the same one. Two function declarations are the same when they are a) identical, b) same name in C c) same qualified named and mangled name in C++. A nondefining (prototype) declaration and a defining declaration of a same function are treated as the same. /*! * There is a similar function bool compareFunctionDeclarations(SgFunctionDeclaration *f1, SgFunctionDeclaration *f2) from Classhierarchy.C */ ROSE_DLL_API bool isSameFunction(SgFunctionDeclaration* func1, SgFunctionDeclaration* func2); //! Check if a statement is the last statement within its closed scope ROSE_DLL_API bool isLastStatement(SgStatement* stmt); //@} //------------------------------------------------------------------------ //@{ /*! @name AST insert, removal, and replacement \brief Add, remove,and replace AST scope->append_statement(), exprListExp->append_expression() etc. are not enough to handle side effect of parent pointers, symbol tables, preprocessing info, defining/nondefining pointers etc. */ // DQ (2/24/2009): Simple function to delete an AST subtree (used in outlining). //! Function to delete AST subtree's nodes only, users must take care of any dangling pointers, symbols or types that result. ROSE_DLL_API void deleteAST(SgNode* node); //! Special purpose function for deleting AST expression tress containing valid original expression trees in constant folded expressions (for internal use only). ROSE_DLL_API void deleteExpressionTreeWithOriginalExpressionSubtrees(SgNode* root); // DQ (2/25/2009): Added new function to support outliner. //! Move statements in first block to the second block (preserves order and rebuilds the symbol table). ROSE_DLL_API void moveStatementsBetweenBlocks ( SgBasicBlock* sourceBlock, SgBasicBlock* targetBlock ); //! Move a variable declaration to a new scope, handle symbol, special scopes like For loop, etc. ROSE_DLL_API void moveVariableDeclaration(SgVariableDeclaration* decl, SgScopeStatement* target_scope); //! Append a statement to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc. ROSE_DLL_API void appendStatement(SgStatement *stmt, SgScopeStatement* scope=NULL); //! Append a list of statements to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc. ROSE_DLL_API void appendStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL); // DQ (2/6/2009): Added function to support outlining into separate file. //! Append a copy ('decl') of a function ('original_statement') into a 'scope', include any referenced declarations required if the scope is within a compiler generated file. All referenced declarations, including those from headers, are inserted if excludeHeaderFiles is set to true (the new file will not have any headers). ROSE_DLL_API void appendStatementWithDependentDeclaration( SgDeclarationStatement* decl, SgGlobal* scope, SgStatement* original_statement, bool excludeHeaderFiles ); //! Prepend a statement to the beginning of the current scope, handling side //! effects as appropriate ROSE_DLL_API void prependStatement(SgStatement *stmt, SgScopeStatement* scope=NULL); //! prepend a list of statements to the beginning of the current scope, //! handling side effects as appropriate ROSE_DLL_API void prependStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL); //! Check if a scope statement has a simple children statement list //! so insert additional statements under the scope is straightforward and unambiguous . //! for example, SgBasicBlock has a simple statement list while IfStmt does not. ROSE_DLL_API bool hasSimpleChildrenList (SgScopeStatement* scope); //! Insert a statement before or after the target statement within the target's scope. Move around preprocessing info automatically ROSE_DLL_API void insertStatement(SgStatement *targetStmt, SgStatement* newStmt, bool insertBefore= true, bool autoMovePreprocessingInfo = true); //! Insert a list of statements before or after the target statement within the //target's scope ROSE_DLL_API void insertStatementList(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts, bool insertBefore= true); //! Insert a statement before a target statement ROSE_DLL_API void insertStatementBefore(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true); //! Insert a list of statements before a target statement ROSE_DLL_API void insertStatementListBefore(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts); //! Insert a statement after a target statement, Move around preprocessing info automatically by default ROSE_DLL_API void insertStatementAfter(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true); //! Insert a list of statements after a target statement ROSE_DLL_API void insertStatementListAfter(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmt); //! Insert a statement after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found ROSE_DLL_API void insertStatementAfterLastDeclaration(SgStatement* stmt, SgScopeStatement* scope); //! Insert a list of statements after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found ROSE_DLL_API void insertStatementAfterLastDeclaration(std::vector<SgStatement*> stmt_list, SgScopeStatement* scope); //! Insert a statement before the first non-declaration statement in a scope. If the scope has no non-declaration statements // then the statement is inserted at the end of the scope. ROSE_DLL_API void insertStatementBeforeFirstNonDeclaration(SgStatement *newStmt, SgScopeStatement *scope, bool movePreprocessingInfo=true); //! Insert statements before the first non-declaration statement in a scope. If the scope has no non-declaration statements //then the new statements are inserted at the end of the scope. ROSE_DLL_API void insertStatementListBeforeFirstNonDeclaration(const std::vector<SgStatement*> &newStmts, SgScopeStatement *scope); //! Remove a statement from its attach point of the AST. Automatically keep its associated preprocessing information at the original place after the removal. The statement is still in memory and it is up to the users to decide if the removed one will be inserted somewhere else or released from memory (deleteAST()). ROSE_DLL_API void removeStatement(SgStatement* stmt, bool autoRelocatePreprocessingInfo = true); //! Deep delete a sub AST tree. It uses postorder traversal to delete each child node. Users must take care of any dangling pointers, symbols or types that result. This is identical to deleteAST() ROSE_DLL_API void deepDelete(SgNode* root); //! Replace a statement with another. Move preprocessing information from oldStmt to newStmt if requested. ROSE_DLL_API void replaceStatement(SgStatement* oldStmt, SgStatement* newStmt, bool movePreprocessinInfo = false); //! Replace an anchor node with a specified pattern subtree with optional SgVariantExpression. All SgVariantExpression in the pattern will be replaced with copies of the anchor node. ROSE_DLL_API SgNode* replaceWithPattern (SgNode * anchor, SgNode* new_pattern); //! Replace all variable references to an old symbol in a scope to being references to a new symbol. // Essentially replace variable a with b. ROSE_DLL_API void replaceVariableReferences(SgVariableSymbol* old_sym, SgVariableSymbol* new_sym, SgScopeStatement * scope ); /** Given an expression, generates a temporary variable whose initializer optionally evaluates * that expression. Then, the var reference expression returned can be used instead of the original * expression. The temporary variable created can be reassigned to the expression by the returned SgAssignOp; * this can be used when the expression the variable represents needs to be evaluated. NOTE: This handles * reference types correctly by using pointer types for the temporary. * @param expression Expression which will be replaced by a variable * @param scope scope in which the temporary variable will be generated * @param reEvaluate an assignment op to reevaluate the expression. Leave NULL if not needed * @return declaration of the temporary variable, and a a variable reference expression to use instead of * the original expression. */ std::pair<SgVariableDeclaration*, SgExpression* > createTempVariableForExpression(SgExpression* expression, SgScopeStatement* scope, bool initializeInDeclaration, SgAssignOp** reEvaluate = NULL); /* This function creates a temporary variable for a given expression in the given scope This is different from SageInterface::createTempVariableForExpression in that it does not try to be smart to create pointers to reference types and so on. The tempt is initialized to expression. The caller is responsible for setting the parent of SgVariableDeclaration since buildVariableDeclaration may not set_parent() when the scope stack is empty. See programTransformation/extractFunctionArgumentsNormalization/ExtractFunctionArguments.C for sample usage. @param expression Expression which will be replaced by a variable @param scope scope in which the temporary variable will be generated */ std::pair<SgVariableDeclaration*, SgExpression*> createTempVariableAndReferenceForExpression (SgExpression* expression, SgScopeStatement* scope); //! Append an argument to SgFunctionParameterList, transparently set parent,scope, and symbols for arguments when possible /*! We recommend to build SgFunctionParameterList before building a function declaration However, it is still allowed to append new arguments for existing function declarations. \todo function type , function symbol also need attention. */ ROSE_DLL_API SgVariableSymbol* appendArg(SgFunctionParameterList *, SgInitializedName*); //!Prepend an argument to SgFunctionParameterList ROSE_DLL_API SgVariableSymbol* prependArg(SgFunctionParameterList *, SgInitializedName*); //! Append an expression to a SgExprListExp, set the parent pointer also ROSE_DLL_API void appendExpression(SgExprListExp *, SgExpression*); //! Append an expression list to a SgExprListExp, set the parent pointers also ROSE_DLL_API void appendExpressionList(SgExprListExp *, const std::vector<SgExpression*>&); //! Set parameter list for a function declaration, considering existing parameter list etc. // void setParameterList(SgFunctionDeclaration *func,SgFunctionParameterList *paralist); template <class actualFunction> ROSE_DLL_API void setParameterList(actualFunction *func,SgFunctionParameterList *paralist); # if 1 // DQ (11/25/2011): Moved to the header file so that it could be seen as a template function. // TODO consider the difference between C++ and Fortran // fixup the scope of arguments,no symbols for nondefining function declaration's arguments template <class actualFunction> void // SageInterface::setParameterList(SgFunctionDeclaration * func,SgFunctionParameterList * paralist) setParameterList(actualFunction* func, SgFunctionParameterList* paralist) { // DQ (11/25/2011): Modified this to be a templated function so that we can handle both // SgFunctionDeclaration and SgTemplateFunctionDeclaration (and their associated member // function derived classes). ROSE_ASSERT(func != NULL); ROSE_ASSERT(paralist != NULL); #if 0 // At this point we don't have cerr and endl defined, so comment this code out. // Warn to users if a paralist is being shared if (paralist->get_parent() !=NULL) { cerr << "Waring! Setting a used SgFunctionParameterList to function: " << (func->get_name()).getString()<<endl << " Sharing parameter lists can corrupt symbol tables!"<<endl << " Please use deepCopy() to get an exclusive parameter list for each function declaration!"<<endl; // ROSE_ASSERT(false); } #endif // Liao,2/5/2008 constructor of SgFunctionDeclaration will automatically generate SgFunctionParameterList, so be cautious when set new paralist!! if (func->get_parameterList() != NULL) { if (func->get_parameterList() != paralist) { delete func->get_parameterList(); } } func->set_parameterList(paralist); paralist->set_parent(func); // DQ (5/15/2012): Need to set the declptr in each SgInitializedName IR node. // This is needed to support the AST Copy mechanism (at least). The files: test2005_150.C, // test2012_81.C and testcode2012_82.C demonstrate this problem. SgInitializedNamePtrList & args = paralist->get_args(); for (SgInitializedNamePtrList::iterator i = args.begin(); i != args.end(); i++) { (*i)->set_declptr(func); } } #endif //! Set a pragma of a pragma declaration. handle memory release for preexisting pragma, and set parent pointer. ROSE_DLL_API void setPragma(SgPragmaDeclaration* decl, SgPragma *pragma); //! Replace an expression with another, used for variable reference substitution and others. the old expression can be deleted (default case) or kept. ROSE_DLL_API void replaceExpression(SgExpression* oldExp, SgExpression* newExp, bool keepOldExp=false); //! Replace a given expression with a list of statements produced by a generator ROSE_DLL_API void replaceExpressionWithStatement(SgExpression* from, SageInterface::StatementGenerator* to); //! Similar to replaceExpressionWithStatement, but with more restrictions. //! Assumptions: from is not within the test of a loop or ifStmt, not currently traversing from or the statement it is in ROSE_DLL_API void replaceSubexpressionWithStatement(SgExpression* from, SageInterface::StatementGenerator* to); //! Set operands for expressions with single operand, such as unary expressions. handle file info, lvalue, pointer downcasting, parent pointer etc. ROSE_DLL_API void setOperand(SgExpression* target, SgExpression* operand); //!set left hand operand for binary expressions, transparently downcasting target expressions when necessary ROSE_DLL_API void setLhsOperand(SgExpression* target, SgExpression* lhs); //!set left hand operand for binary expression ROSE_DLL_API void setRhsOperand(SgExpression* target, SgExpression* rhs); //! Set original expression trees to NULL for SgValueExp or SgCastExp expressions, so you can change the value and have it unparsed correctly. ROSE_DLL_API void removeAllOriginalExpressionTrees(SgNode* top); // DQ (1/25/2010): Added support for directories //! Move file to be generated in a subdirectory (will be generated by the unparser). ROSE_DLL_API void moveToSubdirectory ( std::string directoryName, SgFile* file ); //! Supporting function to comment relocation in insertStatement() and removeStatement(). ROSE_DLL_API SgStatement* findSurroundingStatementFromSameFile(SgStatement* targetStmt, bool & surroundingStatementPreceedsTargetStatement); //! Relocate comments and CPP directives from one statement to another. ROSE_DLL_API void moveCommentsToNewStatement(SgStatement* sourceStatement, const std::vector<int> & indexList, SgStatement* targetStatement, bool surroundingStatementPreceedsTargetStatement); //@} //------------------------------------------------------------------------ //@{ /*! @name AST repair, fix, and postprocessing. \brief Mostly used internally when some AST pieces are built without knowing their target scope/parent, especially during bottom-up construction of AST. The associated symbols, parent and scope pointers cannot be set on construction then. A set of utility functions are provided to patch up scope, parent, symbol for them when the target scope/parent become know. */ //! Connect variable reference to the right variable symbols when feasible, return the number of references being fixed. /*! In AST translation, it is possible to build a variable reference before the variable is being declared. buildVarRefExp() will use fake initialized name and symbol as placeholders to get the work done. Users should call fixVariableReference() when AST is complete and all variable declarations are in place. */ ROSE_DLL_API int fixVariableReferences(SgNode* root); //!Patch up symbol, scope, and parent information when a SgVariableDeclaration's scope is known. /*! It is possible to build a variable declaration without knowing its scope information during bottom-up construction of AST, though top-down construction is recommended in general. In this case, we have to patch up symbol table, scope and parent information when the scope is known. This function is usually used internally within appendStatment(), insertStatement(). */ ROSE_DLL_API void fixVariableDeclaration(SgVariableDeclaration* varDecl, SgScopeStatement* scope); //! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a struct declaration was built without knowing its target scope. ROSE_DLL_API void fixStructDeclaration(SgClassDeclaration* structDecl, SgScopeStatement* scope); //! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a class declaration was built without knowing its target scope. ROSE_DLL_API void fixClassDeclaration(SgClassDeclaration* classDecl, SgScopeStatement* scope); //! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a namespace declaration was built without knowing its target scope. ROSE_DLL_API void fixNamespaceDeclaration(SgNamespaceDeclarationStatement* structDecl, SgScopeStatement* scope); //! Fix symbol table for SgLabelStatement. Used Internally when the label is built without knowing its target scope. Both parameters cannot be NULL. ROSE_DLL_API void fixLabelStatement(SgLabelStatement* label_stmt, SgScopeStatement* scope); //! Set a numerical label for a Fortran statement. The statement should have a enclosing function definition already. SgLabelSymbol and SgLabelRefExp are created transparently as needed. ROSE_DLL_API void setFortranNumericLabel(SgStatement* stmt, int label_value); //! Suggest next usable (non-conflicting) numeric label value for a Fortran function definition scope ROSE_DLL_API int suggestNextNumericLabel(SgFunctionDefinition* func_def); //! Fix the symbol table and set scope (only if scope in declaration is not already set). ROSE_DLL_API void fixFunctionDeclaration(SgFunctionDeclaration* stmt, SgScopeStatement* scope); //! Fix the symbol table and set scope (only if scope in declaration is not already set). ROSE_DLL_API void fixTemplateDeclaration(SgTemplateDeclaration* stmt, SgScopeStatement* scope); //! A wrapper containing fixes (fixVariableDeclaration(),fixStructDeclaration(), fixLabelStatement(), etc) for all kinds statements. Should be used before attaching the statement into AST. ROSE_DLL_API void fixStatement(SgStatement* stmt, SgScopeStatement* scope); //@} //! Update defining and nondefining links due to a newly introduced function declaration. Should be used after inserting the function into a scope. /*! This function not only set the defining and nondefining links of the newly introduced * function declaration inside a scope, but also update other same function declarations' links * accordingly if there are any. * Assumption: The function has already inserted/appended/prepended into the scope before calling this function. */ ROSE_DLL_API void updateDefiningNondefiningLinks(SgFunctionDeclaration* func, SgScopeStatement* scope); //------------------------------------------------------------------------ //@{ /*! @name Advanced AST transformations, analyses, and optimizations \brief Some complex but commonly used AST transformations. */ //! Collect all read and write references within stmt, which can be a function, a scope statement, or a single statement. Note that a reference can be both read and written, like i++ ROSE_DLL_API bool collectReadWriteRefs(SgStatement* stmt, std::vector<SgNode*>& readRefs, std::vector<SgNode*>& writeRefs, bool useCachedDefUse=false); //!Collect unique variables which are read or written within a statement. Note that a variable can be both read and written. The statement can be either of a function, a scope, or a single line statement. ROSE_DLL_API bool collectReadWriteVariables(SgStatement* stmt, std::set<SgInitializedName*>& readVars, std::set<SgInitializedName*>& writeVars); //!Collect read only variables within a statement. The statement can be either of a function, a scope, or a single line statement. ROSE_DLL_API void collectReadOnlyVariables(SgStatement* stmt, std::set<SgInitializedName*>& readOnlyVars); //!Collect read only variable symbols within a statement. The statement can be either of a function, a scope, or a single line statement. ROSE_DLL_API void collectReadOnlySymbols(SgStatement* stmt, std::set<SgVariableSymbol*>& readOnlySymbols); //! Check if a variable reference is used by its address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++ ROSE_DLL_API bool isUseByAddressVariableRef(SgVarRefExp* ref); //! Collect variable references involving use by address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++ ROSE_DLL_API void collectUseByAddressVariableRefs (const SgStatement* s, std::set<SgVarRefExp* >& varSetB); #ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT //!Call liveness analysis on an entire project ROSE_DLL_API LivenessAnalysis * call_liveness_analysis(SgProject* project, bool debug=false); //!get liveIn and liveOut variables for a for loop from liveness analysis result liv. ROSE_DLL_API void getLiveVariables(LivenessAnalysis * liv, SgForStatement* loop, std::set<SgInitializedName*>& liveIns, std::set<SgInitializedName*> & liveOuts); #endif //!Recognize and collect reduction variables and operations within a C/C++ loop, following OpenMP 3.0 specification for allowed reduction variable types and operation types. ROSE_DLL_API void ReductionRecognition(SgForStatement* loop, std::set< std::pair <SgInitializedName*, VariantT> > & results); //! Constant folding an AST subtree rooted at 'r' (replacing its children with their constant values, if applicable). Please be advised that constant folding on floating point computation may decrease the accuracy of floating point computations! /*! It is a wrapper function for ConstantFolding::constantFoldingOptimization(). Note that only r's children are replaced with their corresponding constant values, not the input SgNode r itself. You have to call this upon an expression's parent node if you want to fold the expression. */ ROSE_DLL_API void constantFolding(SgNode* r); //!Instrument(Add a statement, often a function call) into a function right before the return points, handle multiple return statements and return expressions with side effects. Return the number of statements inserted. /*! Useful when adding a runtime library call to terminate the runtime system right before the end of a program, especially for OpenMP and UPC runtime systems. Return with complex expressions with side effects are rewritten using an additional assignment statement. */ ROSE_DLL_API int instrumentEndOfFunction(SgFunctionDeclaration * func, SgStatement* s); //! Remove jumps whose label is immediately after the jump. Used to clean up inlined code fragments. ROSE_DLL_API void removeJumpsToNextStatement(SgNode*); //! Remove labels which are not targets of any goto statements ROSE_DLL_API void removeUnusedLabels(SgNode* top); //! Remove consecutive labels ROSE_DLL_API void removeConsecutiveLabels(SgNode* top); //! Replace an expression with a temporary variable and an assignment statement /*! Add a new temporary variable to contain the value of 'from' Change reference to 'from' to use this new variable Assumptions: 'from' is not within the test of a loop or 'if' not currently traversing 'from' or the statement it is in */ ROSE_DLL_API SgAssignInitializer* splitExpression(SgExpression* from, std::string newName = ""); //! Split long expressions into blocks of statements ROSE_DLL_API void splitExpressionIntoBasicBlock(SgExpression* expr); //! Remove labeled goto statements ROSE_DLL_API void removeLabeledGotos(SgNode* top); //! If the given statement contains any break statements in its body, add a new label below the statement and change the breaks into gotos to that new label. ROSE_DLL_API void changeBreakStatementsToGotos(SgStatement* loopOrSwitch); //! Check if the body of a 'for' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfFor(SgForStatement* fs); //! Check if the body of a 'upc_forall' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfUpcForAll(SgUpcForAllStatement* fs); //! Check if the body of a 'while' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfWhile(SgWhileStmt* ws); //! Check if the body of a 'do .. while' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfDoWhile(SgDoWhileStmt* ws); //! Check if the body of a 'switch' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfSwitch(SgSwitchStatement* ws); //! Check if the body of a 'case option' statement is a SgBasicBlock, create one if not. SgBasicBlock* ensureBasicBlockAsBodyOfCaseOption(SgCaseOptionStmt* cs); //! Check if the body of a 'default option' statement is a SgBasicBlock, create one if not. SgBasicBlock* ensureBasicBlockAsBodyOfDefaultOption(SgDefaultOptionStmt * cs); //! Check if the true body of a 'if' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsTrueBodyOfIf(SgIfStmt* ifs); //! Check if the false body of a 'if' statement is a SgBasicBlock, create one if not when the flag is true. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsFalseBodyOfIf(SgIfStmt* ifs, bool createEmptyBody = true); //! Check if the body of a 'catch' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfCatch(SgCatchOptionStmt* cos); //! Check if the body of a SgOmpBodyStatement is a SgBasicBlock, create one if not ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfOmpBodyStmt(SgOmpBodyStatement* ompbodyStmt); //! Check if a statement is a (true or false) body of a container-like parent, such as For, Upc_forall, Do-while, //! switch, If, Catch, OmpBodyStmt, etc bool isBodyStatement (SgStatement* s); //! Fix up ifs, loops, while, switch, Catch, OmpBodyStatement, etc. to have blocks as body components. It also adds an empty else body to if statements that don't have them. void changeAllBodiesToBlocks(SgNode* top, bool createEmptyBody = true); //! The same as changeAllBodiesToBlocks(SgNode* top). To be phased out. void changeAllLoopBodiesToBlocks(SgNode* top); //! Make a single statement body to be a basic block. Its parent is if, while, catch, or upc_forall etc. SgBasicBlock * makeSingleStatementBodyToBlock(SgStatement* singleStmt); #if 0 /** If s is the body of a loop, catch, or if statement and is already a basic block, * s is returned unmodified. Otherwise generate a SgBasicBlock between s and its parent * (a loop, catch, or if statement, etc). */ SgLocatedNode* ensureBasicBlockAsParent(SgStatement* s); #endif //! Get the constant value from a constant integer expression; abort on //! everything else. Note that signed long longs are converted to unsigned. unsigned long long getIntegerConstantValue(SgValueExp* expr); //! Get a statement's dependent declarations which declares the types used in the statement. The returned vector of declaration statements are sorted according to their appearance order in the original AST. Any reference to a class or template class from a namespace will treated as a reference to the enclosing namespace. std::vector<SgDeclarationStatement*> getDependentDeclarations (SgStatement* stmt ); //! Insert an expression (new_exp )before another expression (anchor_exp) has possible side effects, without changing the original semantics. This is achieved by using a comma operator: (new_exp, anchor_exp). The comma operator is returned. SgCommaOpExp *insertBeforeUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp); //! Insert an expression (new_exp ) after another expression (anchor_exp) has possible side effects, without changing the original semantics. This is done by using two comma operators: type T1; ... ((T1 = anchor_exp, new_exp),T1) )... , where T1 is a temp variable saving the possible side effect of anchor_exp. The top level comma op exp is returned. The reference to T1 in T1 = anchor_exp is saved in temp_ref. SgCommaOpExp *insertAfterUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp, SgStatement** temp_decl = NULL, SgVarRefExp** temp_ref = NULL); /// \brief moves the body of a function f to a new function f`; /// f's body is replaced with code that forwards the call to f`. /// \return a pair indicating the statement containing the call of f` /// and an initialized name refering to the temporary variable /// holding the result of f`. In case f returns void /// the initialized name is NULL. /// \param definingDeclaration the defining function declaration of f /// \param newName the name of function f` /// \details f's new body becomes { f`(...); } and { int res = f`(...); return res; } /// for functions returning void and a value, respectively. /// two function declarations are inserted in f's enclosing scope /// \code /// result_type f`(...); <--- (1) /// result_type f (...) { forward call to f` } /// result_type f`(...) { original code } <--- (2) /// \endcode /// Calls to f are not updated, thus in the transformed code all /// calls will continue calling f (this is also true for /// recursive function calls from within the body of f`). /// After the function has created the wrapper, /// definingDeclaration becomes the wrapper function /// The definition of f` is the next entry in the /// statement list; the forward declaration of f` is the previous /// entry in the statement list. /// \pre definingDeclaration must be a defining declaration of a /// free standing function. /// typeid(SgFunctionDeclaration) == typeid(definingDeclaration) /// i.e., this function is NOT implemented for class member functions, /// template functions, procedures, etc. std::pair<SgStatement*, SgInitializedName*> wrapFunction(SgFunctionDeclaration& definingDeclaration, SgName newName); /// \overload /// \tparam NameGen functor that generates a new name based on the old name. /// interface: SgName nameGen(const SgName&) /// \param nameGen name generator /// \brief see wrapFunction for details template <class NameGen> std::pair<SgStatement*, SgInitializedName*> wrapFunction(SgFunctionDeclaration& definingDeclaration, NameGen nameGen) { return wrapFunction(definingDeclaration, nameGen(definingDeclaration.get_name())); } /// \brief convenience function that returns the first initialized name in a /// list of variable declarations. SgInitializedName& getFirstVariable(SgVariableDeclaration& vardecl); //@} // DQ (6/7/2012): Unclear where this function should go... bool hasTemplateSyntax( const SgName & name ); //! Move a declaration to a scope which is the closest to the declaration's use places bool moveDeclarationToInnermostScope(SgDeclarationStatement* decl, bool debug/*= false */); #if 0 //------------------------AST dump, stringify----------------------------- //------------------------------------------------------------------------ std::string buildOperatorString ( SgNode* astNode ); //transformationSupport.h // do we need these? std::string dump_node(const SgNode* astNode); std::string dump_tree(const SgNode* astNode); // or a friendly version of unparseToString(), as a memeber function std::string SgNode::toString(bool asSubTree=true); // dump node or subtree //----------------------------AST comparison------------------------------ //------------------------------------------------------------------------ // How to get generic functions for comparison? bool isNodeEqual(SgNode* node1, SgNode* node2); //? bool isTreeEqual(SgNode* tree1, SgNode* tree2); //! Are two expressions equal (using a deep comparison)? bool expressionTreeEqual(SgExpression*, SgExpression*); //! Are corresponding expressions in two lists equal (using a deep comparison)? bool expressionTreeEqualStar(const SgExpressionPtrList&, const SgExpressionPtrList&); //----------------------AST verfication/repair---------------------------- //------------------------------------------------------------------------ // sanity check of AST subtree, any suggestions? // TODO verifySgNode(SgNode* node, bool subTree=true); //src/midend/astDiagnostics/AstConsistencyTests.h // AstTests::runAllTests(SgProject * ) //src/midend/astUtil/astInterface/AstInterface.h.C //FixSgProject(SgProject &project) //FixSgTree(SgNode* r) //src/frontend/SageIII/astPostProcessing //AstPostProcessing(SgNode * node) //--------------------------AST modification------------------------------ //------------------------------------------------------------------------ // any operations changing AST tree, including // insert, copy, delete(remove), replace // insert before or after some point, argument list is consistent with LowLevelRewrite void insertAst(SgNode* targetPosition, SgNode* newNode, bool insertBefore=true); // previous examples //void myStatementInsert(SgStatement* target,...) // void AstInterfaceBase::InsertStmt(AstNodePtr const & orig, AstNodePtr const &n, bool insertbefore, bool extractfromBasicBlock) // copy // copy children of one basic block to another basic block //void appendStatementCopy (const SgBasicBlock* a, SgBasicBlock* b); void copyStatements (const SgBasicBlock* src, SgBasicBlock* dst); // delete (remove) a node or a whole subtree void removeSgNode(SgNode* targetNode); // need this? void removeSgNodeTree(SgNode* subtree); // need this? void removeStatement( SgStatement* targetStmt); //Move = delete + insert void moveAst (SgNode* src, SgNode* target); // need this? // similar to void moveStatements (SgBasicBlock* src, SgBasicBlock* target); // replace= delete old + insert new (via building or copying) // DQ (1/25/2010): This does not appear to exist as a definition anywhere in ROSE. // void replaceAst(SgNode* oldNode, SgNode* newNode); //void replaceChild(SgNode* parent, SgNode* from, SgNode* to); //bool AstInterface::ReplaceAst( const AstNodePtr& orig, const AstNodePtr& n) //--------------------------AST transformations--------------------------- //------------------------------------------------------------------------ // Advanced AST modifications through basic AST modifications // Might not be included in AST utitlity list, but listed here for the record. // extract statements/content from a scope void flattenBlocks(SgNode* n); //src/midend/astInlining/inlinerSupport.h void renameVariables(SgNode* n); void renameLabels(SgNode* n, SgFunctionDefinition* enclosingFunctionDefinition); void simpleCopyAndConstantPropagation(SgNode* top); void changeAllMembersToPublic(SgNode* n); void removeVariableDeclaration(SgInitializedName* initname); //! Convert something like "int a = foo();" into "int a; a = foo();" SgAssignOp* convertInitializerIntoAssignment(SgAssignInitializer* init); //! Rewrites a while or for loop so that the official test is changed to //! "true" and what had previously been the test is now an if-break //! combination (with an inverted condition) at the beginning of the loop //! body void pushTestIntoBody(LoopStatement* loopStmt); //programTransformation/finiteDifferencing/finiteDifferencing.h //! Move variables declared in a for statement to just outside that statement. void moveForDeclaredVariables(SgNode* root); //------------------------ Is/Has functions ------------------------------ //------------------------------------------------------------------------ // misc. boolean functions // some of them could moved to SgXXX class as a member function bool isOverloaded (SgFunctionDeclaration * functionDeclaration); bool isSwitchCond (const SgStatement* s); bool isIfCond (const SgStatement* s); bool isWhileCond (const SgStatement* s); bool isStdNamespace (const SgScopeStatement* scope); bool isTemplateInst (const SgDeclarationStatement* decl); bool isCtor (const SgFunctionDeclaration* func); bool isDtor (const SgFunctionDeclaration* func); // src/midend/astInlining/typeTraits.h bool hasTrivialDestructor(SgType* t); ROSE_DLL_API bool isNonconstReference(SgType* t); ROSE_DLL_API bool isReferenceType(SgType* t); // generic ones, or move to the SgXXX class as a member function bool isConst(SgNode* node); // const type, variable, function, etc. // .... and more bool isConstType (const SgType* type); bool isConstFunction (const SgFunctionDeclaration* decl); bool isMemberVariable(const SgInitializedName & var); //bool isMemberVariable(const SgNode& in); bool isPrototypeInScope (SgScopeStatement * scope, SgFunctionDeclaration * functionDeclaration, SgDeclarationStatement * startingAtDeclaration); bool MayRedefined(SgExpression* expr, SgNode* root); // bool isPotentiallyModified(SgExpression* expr, SgNode* root); // inlinderSupport.h bool hasAddressTaken(SgExpression* expr, SgNode* root); //src/midend/astInlining/inlinerSupport.C // can also classified as topdown search bool containsVariableReference(SgNode* root, SgInitializedName* var); bool isDeclarationOf(SgVariableDeclaration* decl, SgInitializedName* var); bool isPotentiallyModifiedDuringLifeOf(SgBasicBlock* sc, SgInitializedName* toCheck, SgInitializedName* lifetime) //src/midend/programTransformation/partialRedundancyElimination/pre.h bool anyOfListPotentiallyModifiedIn(const std::vector<SgVariableSymbol*>& syms, SgNode* n); //------------------------ loop handling --------------------------------- //------------------------------------------------------------------------ //get and set loop control expressions // 0: init expr, 1: condition expr, 2: stride expr SgExpression* getForLoopTripleValues(int valuetype,SgForStatement* forstmt ); int setForLoopTripleValues(int valuetype,SgForStatement* forstmt, SgExpression* exp); bool isLoopIndexVarRef(SgForStatement* forstmt, SgVarRefExp *varref); SgInitializedName * getLoopIndexVar(SgForStatement* forstmt); //------------------------expressions------------------------------------- //------------------------------------------------------------------------ //src/midend/programTransformation/partialRedundancyElimination/pre.h int countComputationsOfExpressionIn(SgExpression* expr, SgNode* root); //src/midend/astInlining/replaceExpressionWithStatement.h void replaceAssignmentStmtWithStatement(SgExprStatement* from, StatementGenerator* to); void replaceSubexpressionWithStatement(SgExpression* from, StatementGenerator* to); SgExpression* getRootOfExpression(SgExpression* n); //--------------------------preprocessing info. ------------------------- //------------------------------------------------------------------------ //! Removes all preprocessing information at a given position. void cutPreprocInfo (SgBasicBlock* b, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& save_buf); //! Pastes preprocessing information at the front of a statement. void pastePreprocInfoFront (AttachedPreprocessingInfoType& save_buf, SgStatement* s); //! Pastes preprocessing information at the back of a statement. void pastePreprocInfoBack (AttachedPreprocessingInfoType& save_buf, SgStatement* s); /*! * \brief Moves 'before' preprocessing information. * Moves all preprocessing information attached 'before' the source * statement to the front of the destination statement. */ // a generic one for all /// void movePreprocessingInfo(src, dest, RelativePositionType); void moveBeforePreprocInfo (SgStatement* src, SgStatement* dest); void moveInsidePreprocInfo (SgBasicBlock* src, SgBasicBlock* dest); void moveAfterPreprocInfo (SgStatement* src, SgStatement* dest); //--------------------------------operator-------------------------------- //------------------------------------------------------------------------ from transformationSupport.h, not sure if they should be included here /* return enum code for SAGE operators */ operatorCodeType classifyOverloadedOperator(); // transformationSupport.h /*! \brief generates a source code string from operator name. This function returns a string representing the elementwise operator (for primative types) that would be match that associated with the overloaded operator for a user-defined abstractions (e.g. identifyOperator("operator+()") returns "+"). */ std::string stringifyOperator (std::string name); //--------------------------------macro ---------------------------------- //------------------------------------------------------------------------ std::string buildMacro ( std::string s ); //transformationSupport.h //--------------------------------access functions--------------------------- //----------------------------------get/set sth.----------------------------- // several categories: * get/set a direct child/grandchild node or fields * get/set a property flag value * get a descendent child node using preorder searching * get an ancestor node using bottomup/reverse searching // SgName or string? std::string getFunctionName (SgFunctionCallExp* functionCallExp); std::string getFunctionTypeName ( SgFunctionCallExp* functionCallExpression ); // do we need them anymore? or existing member functions are enought? // a generic one: std::string get_name (const SgNode* node); std::string get_name (const SgDeclarationStatement * declaration); // get/set some property: should moved to SgXXX as an inherent memeber function? // access modifier void setExtern (SgFunctionDeclartion*) void clearExtern() // similarly for other declarations and other properties void setExtern (SgVariableDeclaration*) void setPublic() void setPrivate() #endif // DQ (1/23/2013): Added support for generated a set of source sequence entries. std::set<unsigned int> collectSourceSequenceNumbers( SgNode* astNode ); //--------------------------------Type Traits (C++)--------------------------- bool HasNoThrowAssign(const SgType * const inputType); bool HasNoThrowCopy(const SgType * const inputType); bool HasNoThrowConstructor(const SgType * const inputType); bool HasTrivialAssign(const SgType * const inputType); bool HasTrivialCopy(const SgType * const inputType); bool HasTrivialConstructor(const SgType * const inputType); bool HasTrivialDestructor(const SgType * const inputType); bool HasVirtualDestructor(const SgType * const inputType); bool IsBaseOf(const SgType * const inputBaseType, const SgType * const inputDerivedType); bool IsAbstract(const SgType * const inputType); bool IsClass(const SgType * const inputType); bool IsEmpty(const SgType * const inputType); bool IsEnum(const SgType * const inputType); bool IsPod(const SgType * const inputType); bool IsPolymorphic(const SgType * const inputType); bool IsStandardLayout(const SgType * const inputType); bool IsLiteralType(const SgType * const inputType); bool IsTrivial(const SgType * const inputType); bool IsUnion(const SgType * const inputType); SgType * UnderlyingType(SgType *type); // DQ (3/2/2014): Added a new interface function (used in the snippet insertion support). void supportForInitializedNameLists ( SgScopeStatement* scope, SgInitializedNamePtrList & variableList ); // DQ (3/4/2014): Added support for testing two trees for equivalents using the AST iterators. bool isStructurallyEquivalentAST( SgNode* tree1, SgNode* tree2 ); // JP (10/14/24): Moved code to evaluate a const integer expression (like in array size definitions) to SageInterface /*! The datastructure is used as the return type for SageInterface::evaluateConstIntegerExpression(). One needs to always check whether hasValue_ is true before accessing value_ */ struct const_int_expr_t { size_t value_; bool hasValue_; }; struct const_numeric_expr_t { bool hasValue_; bool isIntOnly_; double value_; }; /*! \brief The function tries to evaluate const integer expressions (such as are used in array dimension sizes). It follows variable symbols, and requires constness. */ struct const_int_expr_t evaluateConstIntegerExpression(SgExpression *expr); struct const_numeric_expr_t evaluateConstNumericExpression(SgExpression *expr); // JP (9/17/14): Added function to test whether two SgType* are equivalent or not bool checkTypesAreEqual(SgType *typeA, SgType *typeB); //--------------------------------Java interface functions --------------------- #ifdef ROSE_BUILD_JAVA_LANGUAGE_SUPPORT ROSE_DLL_API std::string getTempDirectory(SgProject *project); ROSE_DLL_API void destroyTempDirectory(std::string); ROSE_DLL_API SgFile *processFile(SgProject *, std::string, bool unparse = false); ROSE_DLL_API std::string preprocessPackage(SgProject *, std::string); ROSE_DLL_API std::string preprocessImport(SgProject *, std::string); ROSE_DLL_API SgFile* preprocessCompilationUnit(SgProject *, std::string, std::string, bool unparse = true); ROSE_DLL_API SgClassDefinition *findJavaPackage(SgScopeStatement *, std::string); ROSE_DLL_API SgClassDefinition *findOrInsertJavaPackage(SgProject *, std::string, bool create_directory = false); ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassDefinition *package_definition, std::string); ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, std::string, std::string); ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassType *); ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassDefinition *); ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassType *); #endif // ROSE_BUILD_JAVA_LANGUAGE_SUPPORT }// end of namespace #endif
log4qramp.c
#include<Python.h> #include<numpy/arrayobject.h> #include<math.h> #include<omp.h> #define IND(a,i) *((double *)(a->data+i*a->strides[0])) static PyObject *log4qramp(PyObject *self, PyObject *args, PyObject *keywds); static PyObject *log4qramp(PyObject *self, PyObject *args, PyObject *keywds) { PyObject *etc; PyArrayObject *x,*y,*rampparams; double x0,a,b,c,d,e,f,g,x1; int i; npy_intp dims[1]; static char *kwlist[] = {"rampparams","x","etc",NULL}; //etc = PyList_New(0); if(!PyArg_ParseTupleAndKeywords(args,keywds,"OO|O",kwlist,&rampparams,&x,&etc)) { return NULL; } x0 = IND(rampparams,0); a = IND(rampparams,1); b = IND(rampparams,2); c = IND(rampparams,3); d = IND(rampparams,4); e = IND(rampparams,5); f = IND(rampparams,6); g = IND(rampparams,7); x1 = IND(rampparams,8); dims[0] = x->dimensions[0]; y = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE); #pragma omp parallel for for(i=0;i<dims[0];i++) { if(IND(x,i)<=x0) { IND(y,i) = 1; } else { IND(y,i) = a*pow(log(IND(x,i)-x0),4)+b*pow(log(IND(x,i)-x0),3) \ +c*pow(log(IND(x,i)-x0),2)+d*log(IND(x,i)-x0)+e*pow((IND(x,i)-x1),2)\ +f*(IND(x,i)-x1)+g; } } return PyArray_Return(y); } static char module_docstring[]="\ This function creates a model that fits a ramp using quartic-log + quadratic polynomial.\n\ \n\ Parameters\n\ ----------\n\ x0: phase offset for log\n\ a: log(x)^4 term\n\ b: log(x)^3 term\n\ c: log(x)^2 term\n\ d: log(x) term\n\ e: quadratic term\n\ f: linear term\n\ g: constant term\n\ x1: phase offset for polynomial\n\ x: Array of time/phase points\n\ \n\ Returns\n\ -------\n\ This function returns the flux values for the ramp models\n\ \n\ Revisions\n\ ---------\n\ 2009-11-28 Kevin Stevenson, UCF \n\ kevin218@knights.ucf.edu\n\ Original version\n\ 2011-01-05 Nate Lust, UCF\n\ natelust at linux dot com\n\ Converted to c extention function\n\ 2018-11-22 Jonathan Fraine, SSI\n\ jfraine at spacescience.org\n\ Updated c extensions to python3, with support for python2.7\n\ "; static PyMethodDef module_methods[] = { {"log4qramp",(PyCFunction)log4qramp,METH_VARARGS|METH_KEYWORDS,module_docstring},{NULL}}; PyMODINIT_FUNC #if PY_MAJOR_VERSION >= 3 PyInit_log4qramp(void) #else initlog4qramp(void) #endif { #if PY_MAJOR_VERSION >= 3 PyObject *module; static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "log4qramp", /* m_name */ module_docstring, /* m_doc */ -1, /* m_size */ module_methods, /* m_methods */ NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL, /* m_free */ }; #endif #if PY_MAJOR_VERSION >= 3 module = PyModule_Create(&moduledef); if (!module) return NULL; /* Load `numpy` functionality. */ import_array(); return module; #else PyObject *m = Py_InitModule3("log4qramp", module_methods, module_docstring); if (m == NULL) return; /* Load `numpy` functionality. */ import_array(); #endif }
task_parallel.c
#include <stdio.h> #include <errno.h> // for errno #include <math.h> #include <limits.h> // for INT_MAX #include <stdlib.h> // for strtol #include <time.h> #include <omp.h> int numThreads = 1; void freeMatrix(double** matrix, long lins){ for (long i = 0; i < lins; ++i) { free(matrix[i]); } free(matrix); } double** allocMatrix(long lins, long cols){ double** matrix = malloc(lins*sizeof(double*)); for (long i = 0; i < lins; ++i) { matrix[i] = malloc(cols*sizeof(double)); } return matrix; } void printMatrix(double** matrix, long lins, long cols){ for (long i = 0; i < lins; ++i) { for (long j = 0; j < cols; ++j) printf("%lf ", matrix[i][j]); printf("\n"); } printf("\n"); } void fillMatrix(double** matrix, long lins, long cols, long seed){ srand(seed); for (long i = 0; i < lins; ++i) { for (long j = 0; j < cols; ++j) { matrix[i][j]= (double) rand() / INT_MAX; } } } void multiply_row(double* linA, double** B, double* result, long colsB, long size){ for (long j = 0; j < colsB; ++j) { result[j] = 0; for (long k = 0; k < size; ++k){ result[j] += linA[k] * B[k][j]; } } } void multiply_matrix(double** A, double** B, double** result, long linsA, long colsB, long size){ #pragma omp parallel num_threads(numThreads) default(none) \ shared(linsA, numThreads, A, B, result, colsB, size) { #pragma omp single { for (long i = 0; i < linsA; ++i) { #pragma omp task multiply_row(A[i], B, result[i], colsB, size); } } } } long convert_str_long(char *str){ char *p; errno = 0; long conv = strtol(str, &p, 10); if (errno != 0 || *p != '\0') { printf("%s não é um número!\n", str); exit(-1); } return (long)conv; } int main(int argc, char **argv){ if (argc != 9) { printf("É necessário informar os seguintes argumentos:\nO número de threads a serem usadas\nSe as matrizes devem ser exibidas\nSeed para gerar a matriz A\nSeed para gerar a matriz B\nNúmero de linhas de A\nNúmero de colunas de A\nNúmero de linhas de B\nNúmero de colunas de B\n"); return -1; } numThreads = convert_str_long(argv[1]); int show_matrix = convert_str_long(argv[2]); long seedA = convert_str_long(argv[3]); long seedB = convert_str_long(argv[4]); long linsA = convert_str_long(argv[5]); long colsA = convert_str_long(argv[6]); long linsB = convert_str_long(argv[7]); long colsB = convert_str_long(argv[8]); if(colsA != linsB){ printf("Número de colunas de A é diferente do número de linhas de B, multiplicação não é possivel.\n"); return -1; } double t = omp_get_wtime(); double** A = allocMatrix(linsA, colsA); double** B = allocMatrix(linsB, colsB); double** R = allocMatrix(linsA, colsB); fillMatrix(A, linsA, colsA, seedA); fillMatrix(B, linsB, colsB, seedB); multiply_matrix(A, B, R, linsA, colsB, colsA); t = omp_get_wtime() - t; printf("%.10lf\n", t); if(show_matrix == 1){ printMatrix(A, linsA, colsA); printMatrix(B, linsB, colsB); printMatrix(R, linsA, colsB); } freeMatrix(A, linsA); freeMatrix(B, linsB); freeMatrix(R, linsA); return 0; } /* main */
GB_binop__bshift_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bshift_int16) // A.*B function (eWiseMult): GB (_AemultB_08__bshift_int16) // A.*B function (eWiseMult): GB (_AemultB_02__bshift_int16) // A.*B function (eWiseMult): GB (_AemultB_04__bshift_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_int16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bshift_int16) // C+=b function (dense accum): GB (_Cdense_accumb__bshift_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_int16) // C=scalar+B GB (_bind1st__bshift_int16) // C=scalar+B' GB (_bind1st_tran__bshift_int16) // C=A+scalar GB (_bind2nd__bshift_int16) // C=A'+scalar GB (_bind2nd_tran__bshift_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_bitshift_int16 (aij, bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_bitshift_int16 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_INT16 || GxB_NO_BSHIFT_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bshift_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bshift_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bshift_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bshift_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bshift_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bshift_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bshift_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bshift_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bshift_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_bitshift_int16 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bshift_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_bitshift_int16 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_int16 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__bshift_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_int16 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__bshift_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
BackpropagatedBatchNormalization.h
// -------------------------------------------------------------------------- // Binary Brain -- binary neural net framework // // Copyright (C) 2018 by Ryuji Fuchikami // https://github.com/ryuz // ryuji.fuchikami@nifty.com // -------------------------------------------------------------------------- #pragma once #include "bb/Manager.h" #include "bb/DataType.h" #include "bb/Model.h" #include "bb/FrameBuffer.h" #include "bb/SimdSupport.h" #ifdef BB_WITH_CUDA #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" #endif namespace bb { // BatchNormalization template <typename T = float> class BackpropagatedBatchNormalization : public Model { using _super = Model; protected: bool m_host_only = false; bool m_host_simd = true; indices_t m_node_shape; T m_gain = (T)1.00; T m_beta = (T)0.99; public: struct create_t { T gain = (T)1.00; T beta = (T)0.99; }; protected: BackpropagatedBatchNormalization(create_t const &create) { m_gain = create.gain; m_beta = create.beta; } void CommandProc(std::vector<std::string> args) { // HostOnlyモード設定 if (args.size() == 2 && args[0] == "host_only") { m_host_only = EvalBool(args[1]); } // Host SIMDモード設定 if (args.size() == 2 && args[0] == "host_simd") { m_host_simd = EvalBool(args[1]); } } public: ~BackpropagatedBatchNormalization() {} static std::shared_ptr<BackpropagatedBatchNormalization> Create(create_t const &create) { return std::shared_ptr<BackpropagatedBatchNormalization>(new BackpropagatedBatchNormalization(create)); } static std::shared_ptr<BackpropagatedBatchNormalization> Create(T gain = (T)1.00, T beta = (T)0.99) { create_t create; create.gain = gain; create.beta = beta; return Create(create); } std::string GetModelName(void) const { return "BackpropagatedBatchNormalization"; } // Serialize void Save(std::ostream &os) const { SaveIndices(os, m_node_shape); bb::SaveValue(os, m_gain); bb::SaveValue(os, m_beta); } void Load(std::istream &is) { m_node_shape = LoadIndices(is); bb::LoadValue(is, m_gain); bb::LoadValue(is, m_beta); } #ifdef BB_WITH_CEREAL template <class Archive> void save(Archive& archive, std::uint32_t const version) const { _super::save(archive, version); archive(cereal::make_nvp("node_shape", m_node_shape)); archive(cereal::make_nvp("gain", m_gain)); archive(cereal::make_nvp("beta", m_beta)); } template <class Archive> void load(Archive& archive, std::uint32_t const version) { _super::load(archive, version); archive(cereal::make_nvp("node_shape", m_node_shape)); archive(cereal::make_nvp("gain", m_gain)); archive(cereal::make_nvp("beta", m_beta)); } void Save(cereal::JSONOutputArchive& archive) const { archive(cereal::make_nvp("BackpropagatedBatchNormalization", *this)); } void Load(cereal::JSONInputArchive& archive) { archive(cereal::make_nvp("BackpropagatedBatchNormalization", *this)); } #endif /** * @brief 入力形状設定 * @detail 入力形状を設定する * 内部変数を初期化し、以降、GetOutputShape()で値取得可能となることとする * 同一形状を指定しても内部変数は初期化されるものとする * @param shape 1フレームのノードを構成するshape * @return 出力形状を返す */ indices_t SetInputShape(indices_t shape) { // 設定済みなら何もしない if ( shape == this->GetInputShape() ) { return this->GetOutputShape(); } m_node_shape = shape; return shape; } /** * @brief 入力形状取得 * @detail 入力形状を取得する * @return 入力形状を返す */ indices_t GetInputShape(void) const { return m_node_shape; } /** * @brief 出力形状取得 * @detail 出力形状を取得する * @return 出力形状を返す */ indices_t GetOutputShape(void) const { return m_node_shape; } public: /** * @brief パラメータ取得 * @detail パラメータを取得する * Optimizerでの利用を想定 * @return パラメータを返す */ Variables GetParameters(void) { Variables parameters; return parameters; } /** * @brief 勾配取得 * @detail 勾配を取得する * Optimizerでの利用を想定 * @return パラメータを返す */ Variables GetGradients(void) { Variables gradients; return gradients; } // ノード単位でのForward計算 std::vector<double> ForwardNode(index_t node, std::vector<double> x_vec) const { return x_vec; } /** * @brief forward演算 * @detail forward演算を行う * @param x 入力データ * @param train 学習時にtrueを指定 * @return forward演算結果 */ FrameBuffer Forward(FrameBuffer x_buf, bool train=true) { // backwardの為に保存 if ( train ) { this->PushFrameBuffer(x_buf); } return x_buf; } /** * @brief backward演算 * @detail backward演算を行う * * @return backward演算結果 */ FrameBuffer Backward(FrameBuffer dy_buf) { if (dy_buf.Empty()) { return dy_buf; } // 無視できるゲインになったらバイパス if (m_gain <= (T)1.0e-14) { return dy_buf; } FrameBuffer x_buf = this->PopFrameBuffer(); // 出力設定 FrameBuffer dx_buf(dy_buf.GetFrameSize(), dy_buf.GetShape(), dy_buf.GetType()); { auto node_size = dy_buf.GetNodeSize(); auto frame_size = dy_buf.GetFrameSize(); auto x_ptr = x_buf.LockConst<T>(); auto dy_ptr = dy_buf.LockConst<T>(); auto dx_ptr = dx_buf.Lock<T>(true); #pragma omp parallel for for (index_t node = 0; node < node_size; ++node) { T mean = 0; for (index_t frame = 0; frame < frame_size; ++frame) { mean += x_ptr.Get(frame, node); } mean /= frame_size; T var = 0; for (index_t frame = 0; frame < frame_size; ++frame) { auto d = x_ptr.Get(frame, node) - mean; var += d * d; } var /= frame_size; T std = std::sqrt(var); for (index_t frame = 0; frame < frame_size; ++frame) { auto x = x_ptr.Get(frame, node); auto t = (x - mean) / (std + (T)10e-7); t = (t * (T)0.2) + (T)0.5; auto dy = dy_ptr.Get(frame, node); dx_ptr.Set(frame, node, dy + (x - t) * m_gain); } } // ゲイン減衰 m_gain *= m_beta; return dx_buf; } } }; }
kthread_hash_realign.c
#include "bbhashdict.h" void constructdictionary_realign(std::bitset<2*readlen> *read, bbhashdict *dict) { // std::bitset<2*readlen> mask[numdict_s]; std::bitset<2*readlen> *mask = (std::bitset<2*readlen>*)alloca(numdict_s * sizeof(std::bitset<2*readlen>)); generateindexmasks(mask, numdict_s); double mm_realtime0; // fprintf(stderr, "begin constructdictionary_realign()...\n"); for(int j = 0; j < numdict_s; j++) { uint64_t *ull = new uint64_t[numreads]; // fprintf(stderr, "*** parallel begin ***\n"); mm_realtime0 = realtime(); #pragma omp parallel { std::bitset<2*readlen> b; int tid = omp_get_thread_num(); std::ofstream foutkey(outdir+uuid+std::string("keys.bin.")+std::to_string(tid),std::ios::binary); uint32_t i, stop; i = uint64_t(tid)*numreads/omp_get_num_threads(); stop = uint64_t(tid+1)*numreads/omp_get_num_threads(); if(tid == omp_get_num_threads()-1) stop = numreads; //compute keys and write to file and store in ull for(; i < stop; i++) { b = read[i]&mask[j]; ull[i] = (b>>2*dict_start[j]).to_ullong(); foutkey.write((char*)&ull[i], sizeof(uint64_t)); } foutkey.close(); }//parallel end // fprintf(stderr, "*** parallel end ***\n"); // if (mm_verbose >= 3) fprintf(stderr, "[M::%s::%.3f*%.2f] \n", __func__, realtime() - mm_realtime0, cputime() / (realtime() - mm_realtime0)); // mm_realtime0 = realtime(); //deduplicating ull std::sort(ull, ull+numreads); uint32_t k = 0; for (uint32_t i = 1; i < numreads; i++) if (ull[i] != ull[k]) ull[++k] = ull[i]; dict[j].numkeys = k+1; //construct mphf auto data_iterator = boomphf::range(static_cast<const u_int64_t*>(ull), static_cast<const u_int64_t*>(ull+dict[j].numkeys)); double gammaFactor = 5.0;//balance between speed and memory dict[j].bphf = new boomphf::mphf<u_int64_t,hasher_t>(dict[j].numkeys,data_iterator,n_threads,gammaFactor,true,false); delete[] ull; // if (mm_verbose >= 3) fprintf(stderr, "[M::%s::%.3f*%.2f] \n", __func__, realtime() - mm_realtime0, cputime() / (realtime() - mm_realtime0)); // mm_realtime0 = realtime(); //compute hashes for all reads #pragma omp parallel { int tid = omp_get_thread_num(); std::ifstream finkey(outdir+uuid+std::string("keys.bin.")+std::to_string(tid),std::ios::binary); std::ofstream fouthash(outdir+uuid+std::string("hash.bin.")+std::to_string(tid)+'.'+std::to_string(j),std::ios::binary); uint64_t currentkey, currenthash; uint32_t i, stop; i = uint64_t(tid)*numreads/omp_get_num_threads(); stop = uint64_t(tid+1)*numreads/omp_get_num_threads(); if(tid == omp_get_num_threads()-1) stop = numreads; for(; i < stop; i++) { finkey.read((char*)&currentkey, sizeof(uint64_t)); currenthash = (dict[j].bphf)->lookup(currentkey); fouthash.write((char*)&currenthash, sizeof(uint64_t)); } finkey.close(); remove((outdir+uuid+std::string("keys.bin.")+std::to_string(tid)).c_str()); fouthash.close(); }//parallel end } // fprintf(stderr, "middle constructdictionary_realign()...\n"); // if (mm_verbose >= 3) fprintf(stderr, "[M::%s::%.3f*%.2f] \n", __func__, realtime() - mm_realtime0, cputime() / (realtime() - mm_realtime0)); // mm_realtime0 = realtime(); // omp_set_num_threads(std::min(numdict_s,n_threads)); #pragma omp parallel { #pragma omp for for (int j = 0; j < numdict_s; ++j) { //fill startpos by first storing numbers and then doing cumulative sum dict[j].startpos = new uint32_t[dict[j].numkeys+1]();//1 extra to store end pos of last key uint64_t currenthash; for(int tid = 0; tid < n_threads; tid++) { std::ifstream finhash(outdir+uuid+std::string("hash.bin.")+std::to_string(tid)+'.'+std::to_string(j),std::ios::binary); finhash.read((char*)&currenthash,sizeof(uint64_t)); while(!finhash.eof()) { dict[j].startpos[currenthash+1]++; finhash.read((char*)&currenthash,sizeof(uint64_t)); } finhash.close(); } dict[j].empty_bin = new bool[dict[j].numkeys](); for(uint32_t i = 1; i < dict[j].numkeys; i++) dict[j].startpos[i] = dict[j].startpos[i] + dict[j].startpos[i-1]; // if (mm_verbose >= 3) fprintf(stderr, "[M::%s::%.3f*%.2f] \n", __func__, realtime() - mm_realtime0, cputime() / (realtime() - mm_realtime0)); // mm_realtime0 = realtime(); //insert elements in the dict array dict[j].read_id = new uint32_t[numreads]; uint32_t i = 0; for(int tid = 0; tid < n_threads; tid++) { std::ifstream finhash(outdir+uuid+std::string("hash.bin.")+std::to_string(tid)+'.'+std::to_string(j),std::ios::binary); finhash.read((char*)&currenthash,sizeof(uint64_t)); while(!finhash.eof()) { dict[j].read_id[dict[j].startpos[currenthash]++] = i; i++; finhash.read((char*)&currenthash,sizeof(uint64_t)); } finhash.close(); remove((outdir+uuid+std::string("hash.bin.")+std::to_string(tid)+'.'+std::to_string(j)).c_str()); } // if (mm_verbose >= 3) fprintf(stderr, "[M::%s::%.3f*%.2f] \n", __func__, realtime() - mm_realtime0, cputime() / (realtime() - mm_realtime0)); // mm_realtime0 = realtime(); //correcting startpos array modified during insertion for(int64_t i = dict[j].numkeys; i >= 1 ; i--) dict[j].startpos[i] = dict[j].startpos[i-1]; dict[j].startpos[0] = 0; }//for end }//parallel end // fprintf(stderr, "end constructdictionary_realign()...\n"); return; } void setglobalarrays_realign() { chartorevchar['A'] = 'T'; chartorevchar['C'] = 'G'; chartorevchar['G'] = 'C'; chartorevchar['T'] = 'A'; chartoint['A'] = 0; chartoint['C'] = 1; chartoint['G'] = 2; chartoint['T'] = 3; // if (readlen > 50) { int len_t = 17; if (readlen <= 80) len_t = 11; // int len_t = 31; // int len_t = reads->k; numdict_s = readlen / len_t; /*if (numdict_s <= 3) { maxsearch = 2000; len_t = readlen / 3; numdict_s = readlen / len_t; }*/ // if (numdict_s > 4) { // numdict_s = 4; // } if (ininumdict > 1 && ininumdict < numdict_s) { numdict_s = ininumdict; } // numdict_s = 3; dict_start = new int[numdict_s]; dict_end = new int[numdict_s]; /*dict_start[0] = dict1_start; dict_end[0] = dict1_end; dict_start[1] = dict2_start; dict_end[1] = dict2_end;*/ /*dict_start[0] = 0; dict_end[0] = 20; dict_start[1] = 21; dict_end[1] = 41;*/ /*dict_start[0] = 0; dict_end[0] = 15; dict_start[1] = 16; dict_end[1] = 31*/; /* dict_start[1] = 21; dict_end[1] = 41; dict_start[2] = 42; dict_end[2] = 62; */ if (ininumdict > 0 && ininumdict < numdict_s) { dict_start[0] = readlen/2 - (len_t * numdict_s)/2; } else { dict_start[0] = 0; } dict_end[0] = dict_start[0] + len_t - 1; for (int i = 1; i < numdict_s; ++i) { dict_start[i] = dict_end[i-1] + 1; dict_end[i] = dict_start[i] + len_t - 1; } // for (int i = 0; i < numdict_s; ++i) { // fprintf(stderr, "%d, %d\n", dict_start[i], dict_end[i]); // } // fprintf(stderr, "----------------\n"); /*dict_start[0] = 0; dict_end[0] = 19; for (int i = 1; i < numdict_s; ++i) { dict_start[i] = dict_end[i-1] + 1; dict_end[i] = dict_start[i] + 20; }*/ /*dict_start[0] = 0; dict_end[0] = 31; dict_start[1] = 32; dict_end[1] = 63;*/ // #define dict1_start 18 // #define dict1_end 49 // #define dict2_start 50 // #define dict2_end 81 // dict_start[0] = 18; // dict_end[0] = 47; // dict_start[1] = 50; // dict_end[1] = 79; // } else { // numdict_s = 2; // dict_start = new int[numdict_s]; // dict_end = new int[numdict_s]; // dict_start[0] = 0; // dict_end[0] = 20*readlen/50; // dict_start[1] = 20*readlen/50 + 1; // dict_end[1] = 41*readlen/50; // } for(int i = 0; i < 64; i++) mask64[i] = 1; // std::bitset<2*readlen> basemask[readlen][128] // std::bitset<2*readlen> positionmask[readlen] basemask = (std::bitset<2*readlen>**)calloc(readlen, sizeof(std::bitset<2*readlen>*)); positionmask = (std::bitset<2*readlen>*)calloc(readlen, sizeof(std::bitset<2*readlen>)); for(int i = 0; i < readlen; i++) { basemask[i] = (std::bitset<2*readlen>*)calloc(128, sizeof(std::bitset<2*readlen>)); basemask[i]['A'][2*i] = 0; basemask[i]['A'][2*i+1] = 0; basemask[i]['C'][2*i] = 0; basemask[i]['C'][2*i+1] = 1; basemask[i]['G'][2*i] = 1; basemask[i]['G'][2*i+1] = 0; basemask[i]['T'][2*i] = 1; basemask[i]['T'][2*i+1] = 1; positionmask[i][2*i] = 1; positionmask[i][2*i+1] = 1; } return; } struct kt_realign_hash_for_t; typedef struct { struct kt_realign_hash_for_t *t; long i, n; //i < n; // int tid; } ktf_realign_hash_worker_t; typedef struct kt_realign_hash_for_t { int n_threads, index, threshold;//win is the length of window ktf_realign_hash_worker_t *w; reads_t *reads; std::bitset<2*readlen> *read, *mask, *revmask, *mask1; bbhashdict *dict; pthread_mutex_t *dict_lock; pthread_mutex_t *read_lock; } kt_realign_hash_for_t; bool encode_byte(char *seq, char *ref, int pos, int dir) { char *temp_str = (char*)alloca((readlen + 1) * sizeof(char)); char *en_str = (char*)alloca((readlen + 1) * sizeof(char)); char *int_str = (char*)alloca(10 * sizeof(char)); strcpy(temp_str, seq); if (dir) { reverse_complement(temp_str, readlen); } int en_str_len = 0; int eq_char_num = 0; for (int rj = pos, tj = 0; tj < readlen; ++rj, ++tj) { if (ref[rj] != temp_str[tj]) { if (eq_char_num > 1) { sprintf(int_str, "%d", eq_char_num); for (char *tk = int_str; *tk != '\0'; ++tk) { en_str[en_str_len++] = *tk; } eq_char_num = 0; } else { for (int i = tj - eq_char_num; i < tj; ++i) { en_str[en_str_len++] = temp_str[i]; } } en_str[en_str_len++] = temp_str[tj]; } else ++eq_char_num; } if (en_str_len == 0) { en_str[en_str_len++] = '0'; } en_str[en_str_len] = '\0'; return en_str_len <= readlen*0.4; } static void realign_hash_search(kt_realign_hash_for_t *t, int i_, int tid_) { cluster_t *p = &reads->clusters[t->index][tid_].a[i_]; qsort(p->a, p->n, sizeof(uint64_t), cmpcluster2); std::bitset<2*readlen> ref, revref, b; int64_t *dictidx = (int64_t*)alloca(2 * sizeof(int64_t));//to store the start and end index (end not inclusive) in the dict read_id array uint32_t startposidx;//index in startpos bool flag = false; uint32_t current, k, rid; uint64_t ull, y; int pre_pn = p->n; // uint64_t y = p->a[p->n - 1]; // the last one // uint32_t rid = (uint32_t)(y >> 32); // int pos = (uint32_t)y >> 1; std::list<uint32_t> *deleted_rids = new std::list<uint32_t> [numdict_s]; // b = stringtobitset(reads->seq[rid].seq); char *s1 = (char*)alloca((readlen + 1) * sizeof(char)); int ref_len = strlen(p->ref) - readlen + 1; // bool debug = false; // if (strcmp(p->ref, "CCGTCACCCGGGGTCCCCAGGGTAGGCACGGCGAATACCATCGAAAGTTGATAGGGCAGCCGTTCGAATGGGTCGTCGCCGCCACGGGGGGCGTGCGATCGG") == 0) { // debug = true; // } /*if (debug) { fprintf(stderr, "p->n: %d\n", p->n); fprintf(stderr, "p->ref: %s\n", p->ref); for (int k = 0; k < p->n; ++k) { // p->a[k] uint64_t y = p->a[k]; int rid = y>>32; int pos = (uint32_t)y>>1; int dir = y&1; fprintf(stderr, "%s\n", reads->seq[rid].seq); } fprintf(stderr, "---\n"); exit(0); }*/ for (int jj = 0; jj < ref_len; ++jj) { ref = stringtobitset(p->ref + jj); reverse_complement_(p->ref + jj, s1); revref = stringtobitset(s1); flag = false; int j = 0; // equivalent to for (int j = 0; j < maxmatch; ++j) { //find forward match for (int l = 0; l < numdict_s; ++l) { if (dict_end[l] + j >= readlen) { continue; } // fprintf(stderr, "l: %d\n", l); b = ref & t->mask1[l]; ull = (b >> 2*dict_start[l]).to_ullong(); // fprintf(stderr, "%lu\n", ); startposidx = t->dict[l].bphf->lookup(ull); if (startposidx >= t->dict[l].numkeys)//not found continue; //check if any other thread is modifying same dictpos if (pthread_mutex_trylock(&t->dict_lock[startposidx & 0xFFFFFF])) { continue; } // pthread_mutex_lock(&t->dict_lock[startposidx & 0xFFFFFF]); t->dict[l].findpos(dictidx, startposidx); // fprintf(stderr, "dictidx: %u\n", dictidx); if (t->dict[l].empty_bin[startposidx]) { //bin is empty pthread_mutex_unlock(&t->dict_lock[startposidx & 0xFFFFFF]); continue; } uint64_t ull1 = ((t->read[t->dict[l].read_id[dictidx[0]]] & t->mask1[l]) >> 2*dict_start[l]).to_ullong(); if (ull == ull1) { //checking if ull is actually the key for this bin // fprintf(stderr, "begin enumurate...\n"); for (int64_t i = dictidx[1] - 1 ; i >= dictidx[0] && i >= dictidx[1] - maxsearch; i--) { auto sg_id = t->dict[l].read_id[i]; rid = reads->sg.a[sg_id]; // if ((ref^(t->read[sg_id]&t->mask[j])).count() <= t->threshold && (t->threshold <= 24 || encode_byte(reads->seq[rid].seq, p->ref, jj, 0))) { // if ((ref^(t->read[sg_id]&t->mask[j])).count() <= t->threshold && encode_byte(reads->seq[rid].seq, p->ref, jj, 0)) { if (basediff(ref^(t->read[sg_id]&t->mask[j])) <= t->threshold && encode_byte(reads->seq[rid].seq, p->ref, jj, 0)) { // if ((ref^(t->read[sg_id]&t->mask[j])).count() <= t->threshold) { pthread_mutex_lock(&t->read_lock[sg_id & 0xFFFFFF]); if (!reads->sg_flag[sg_id]) { reads->sg_flag[sg_id] = true; // if (reads->sg.a[sg_id] == 6366077) fprintf(stderr, "rid == 6366077 in kthread_hash_realign.c\n"); flag = true; } pthread_mutex_unlock(&t->read_lock[sg_id & 0xFFFFFF]); if (flag) { flag = false; // rid = reads->sg.a[sg_id]; // fprintf(stderr, "find!!! %s\n", reads->seq[rid].seq); // if (rid == 6366077) debug = true; y = (uint64_t)rid << 32 | ((uint64_t)(jj) << 1) | 0; kv_push(uint64_t, *p, y); for(int l1 = 0; l1 < numdict_s; l1++) { deleted_rids[l1].push_back(sg_id); } } } } } pthread_mutex_unlock(&t->dict_lock[startposidx & 0xFFFFFF]); //delete from dictionaries for (int l1 = 0; l1 < numdict_s; ++l1) { for(auto it = deleted_rids[l1].begin(); it != deleted_rids[l1].end();) { b = t->read[*it] & t->mask1[l1]; ull = (b >> 2*dict_start[l1]).to_ullong(); startposidx = t->dict[l1].bphf->lookup(ull); if (pthread_mutex_trylock(&t->dict_lock[startposidx & 0xFFFFFF])) { ++it; continue; } // pthread_mutex_lock(&t->dict_lock[startposidx & 0xFFFFFF]); t->dict[l1].findpos(dictidx, startposidx); t->dict[l1].remove(dictidx, startposidx, *it); it = deleted_rids[l1].erase(it); pthread_mutex_unlock(&t->dict_lock[startposidx & 0xFFFFFF]); } } } if (flag) continue; //find reverse match for (int l = 0; l < numdict_s; l++) { if (dict_start[l] <= j) continue; b = revref&t->mask1[l]; ull = (b>>2*dict_start[l]).to_ullong(); startposidx = t->dict[l].bphf->lookup(ull); if (startposidx >= t->dict[l].numkeys)//not found continue; //check if any other thread is modifying same dictpos // pthread_mutex_lock(&t->dict_lock[startposidx & 0xFFFFFF]); if (pthread_mutex_trylock(&t->dict_lock[startposidx & 0xFFFFFF])) { continue; } t->dict[l].findpos(dictidx,startposidx); if (t->dict[l].empty_bin[startposidx]) {//bin is empty pthread_mutex_unlock(&t->dict_lock[startposidx & 0xFFFFFF]); continue; } uint64_t ull1 = ((t->read[t->dict[l].read_id[dictidx[0]]] & t->mask1[l])>>2*dict_start[l]).to_ullong(); if (ull == ull1) { //checking if ull is actually the key for this bin for (int64_t i = dictidx[1] - 1 ; i >= dictidx[0] && i >= dictidx[1] - maxsearch; i--) { auto sg_id = t->dict[l].read_id[i]; rid = reads->sg.a[sg_id]; if ((revref^(t->read[sg_id]&t->revmask[j])).count() <= t->threshold && (t->threshold <= 24 || encode_byte(reads->seq[rid].seq, p->ref, jj, 1))) { // if ((revref^(t->read[sg_id]&t->revmask[j])).count() <= t->threshold && encode_byte(reads->seq[rid].seq, p->ref, jj, 1)) { // if ((revref^(t->read[sg_id]&t->revmask[j])).count() <= t->threshold) { pthread_mutex_lock(&t->read_lock[sg_id & 0xFFFFFF]); if (!reads->sg_flag[sg_id]) { reads->sg_flag[sg_id] = true; flag = true; } pthread_mutex_unlock(&t->read_lock[sg_id & 0xFFFFFF]); if (flag) { flag = false; // if (rid == 6366077) debug = true; // rid = reads->sg.a[sg_id]; y = (uint64_t)rid << 32 | ((uint64_t)(jj) << 1) | 1; kv_push(uint64_t, *p, y); for(int l1 = 0; l1 < numdict_s; l1++) { deleted_rids[l1].push_back(sg_id); } } } } } pthread_mutex_unlock(&t->dict_lock[startposidx & 0xFFFFFF]); //delete from dictionaries for (int l1 = 0; l1 < numdict_s; ++l1) { for(auto it = deleted_rids[l1].begin(); it != deleted_rids[l1].end();) { b = t->read[*it] & t->mask1[l1]; ull = (b >> 2*dict_start[l1]).to_ullong(); startposidx = t->dict[l1].bphf->lookup(ull); if (pthread_mutex_trylock(&t->dict_lock[startposidx & 0xFFFFFF])) { ++it; continue; } // pthread_mutex_lock(&t->dict_lock[startposidx & 0xFFFFFF]); t->dict[l1].findpos(dictidx, startposidx); t->dict[l1].remove(dictidx, startposidx, *it); it = deleted_rids[l1].erase(it); pthread_mutex_unlock(&t->dict_lock[startposidx & 0xFFFFFF]); } } } } delete[] deleted_rids; // update_reference(reads, p, p->n); } static void *ktf_realign_hash_worker(void *data) { ktf_realign_hash_worker_t *w = (ktf_realign_hash_worker_t*)data; kt_realign_hash_for_t *t = w->t; // fprintf(stderr, "tid: %ld\n", w - w->t->w); long i; for (;;) { i = __sync_fetch_and_add(&w->i, 1); if (i >= w->n) break; realign_hash_search(t, i, w - w->t->w); } pthread_exit(0); } void kt_realign_hash_for(int n_threads, reads_t *reads, int index, int max_threshold, std::bitset<2*readlen> *read, bbhashdict *dict) { int i; kt_realign_hash_for_t t; pthread_t *tid; t.reads = reads, t.n_threads = n_threads, t.index = index, t.threshold = max_threshold, t.dict = dict, t.read = read; t.w = (ktf_realign_hash_worker_t*)calloc(n_threads, sizeof(ktf_realign_hash_worker_t)); t.dict_lock = (pthread_mutex_t*)calloc(num_locks, sizeof(pthread_mutex_t)); t.read_lock = (pthread_mutex_t*)calloc(num_locks, sizeof(pthread_mutex_t)); for (int j = 0; j < num_locks; ++j) { pthread_mutex_init(&t.dict_lock[j], 0); pthread_mutex_init(&t.read_lock[j], 0); } t.mask = (std::bitset<2*readlen>*)calloc(maxmatch, sizeof(std::bitset<2*readlen>)); t.revmask = (std::bitset<2*readlen>*)calloc(maxmatch, sizeof(std::bitset<2*readlen>)); generatemasks(t.mask, t.revmask); t.mask1 = (std::bitset<2*readlen>*)calloc(numdict_s, sizeof(std::bitset<2*readlen>)); generateindexmasks(t.mask1, numdict_s); tid = (pthread_t*)alloca(n_threads * sizeof(pthread_t)); // fprintf(stderr, "n_threads: %d\n", n_threads); for (i = 0; i < n_threads; ++i) { t.w[i].t = &t, t.w[i].i = 0, t.w[i].n = reads->clusters[index][i].n; } // fprintf(stderr, "before pthread_create()...\n"); for (i = 0; i < n_threads; ++i) pthread_create(&tid[i], 0, ktf_realign_hash_worker, &t.w[i]); for (i = 0; i < n_threads; ++i) pthread_join(tid[i], 0); // fprintf(stderr, "after pthread_join()...\n"); free(t.w); free(t.dict_lock); free(t.read_lock); free(t.mask); free(t.mask1); free(t.revmask); } void realign_hash(int n_threads, reads_t *reads, int index, int max_threshold) { // index is clusters[index] numreads = reads->sg.n; // fprintf(stderr, "numreads: %d\n", numreads); // std::cerr << outdir+uuid+std::string("keys.bin.") << "\n"; omp_set_num_threads(n_threads); setglobalarrays_realign(); generateAllATbitset(); std::bitset<2*readlen> *read = new std::bitset<2*readlen> [numreads]; singleRead2bitset(reads, read, max_threshold); bbhashdict dict[numdict_s]; // fprintf(stderr, "Constructing dictionaries\n"); constructdictionary_realign(read, dict); // fprintf(stderr, "begin realign reads\n"); // n_threads = 1; kt_realign_hash_for(n_threads, reads, index, max_threshold, read, dict); freeglobalarrays(); delete[] read; // fprintf(stderr, "end realign_hash(reads_t *reads, int index)\n************-------\n"); }
LISAgeometry.c
/** * \author Sylvain Marsat, University of Maryland - NASA GSFC * * \brief C code for the geometric coefficients entering the response for LISA-like detectors. * */ #define _XOPEN_SOURCE 500 #ifdef __GNUC__ #define UNUSED __attribute__ ((unused)) #else #define UNUSED #endif #include <stdio.h> #include <stdlib.h> #include <math.h> #include <complex.h> #include <time.h> #include <unistd.h> #include <getopt.h> #include <stdbool.h> #include <string.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_bspline.h> #include <gsl/gsl_blas.h> #include <gsl/gsl_min.h> #include <gsl/gsl_spline.h> #include <gsl/gsl_complex.h> #include "constants.h" #include "waveform.h" #include "LISAgeometry.h" #include <time.h> /* for testing */ //Named LISA-like constellation struct examples /* struct tagLISAconstellation { double OrbitOmega,OrbitPhi0,OrbitR; double ConstOmega,ConstPhi0,ConstL; } */ LISAconstellation LISAProposal = { EarthOrbitOmega_SI, 0, AU_SI, EarthOrbitOmega_SI, 0, 2.5e9, LISAProposalnoise }; LISAconstellation LISA2017 = { EarthOrbitOmega_SI, 0, AU_SI, EarthOrbitOmega_SI, 0, 2.5e9, LISA2017noise }; LISAconstellation LISA2010 = { EarthOrbitOmega_SI, 0, AU_SI, EarthOrbitOmega_SI, 0, 5e9, LISA2010noise }; LISAconstellation slowOrbitLISA = { EarthOrbitOmega_SI/100.0, 0, AU_SI, EarthOrbitOmega_SI/100.0, 0, 2.5e9, LISA2017noise }; LISAconstellation tinyOrbitLISA = { EarthOrbitOmega_SI, 0, AU_SI/100, EarthOrbitOmega_SI, 0, 2.5e9, LISA2017noise }; LISAconstellation fastOrbitLISA = { EarthOrbitOmega_SI*10.0, 0, AU_SI, EarthOrbitOmega_SI*10.0, 0, 2.5e9, LISA2017noise }; LISAconstellation bigOrbitLISA = { EarthOrbitOmega_SI/10.0, 0, AU_SI, EarthOrbitOmega_SI/10.0, 0, 2.5e9, LISA2017noise }; /****************************************************************/ /********* Coefficients for the geometric response **************/ /* External storage for cos, sin and coefficients */ static double coeffn1Hn1crossconst, coeffn1Hn1plusconst, coeffn2Hn2crossconst, coeffn2Hn2plusconst, coeffn3Hn3crossconst, coeffn3Hn3plusconst; static double coeffn1Hn1pluscos[4]; static double coeffn1Hn1plussin[4]; static double coeffn2Hn2pluscos[4]; static double coeffn2Hn2plussin[4]; static double coeffn3Hn3pluscos[4]; static double coeffn3Hn3plussin[4]; static double coeffn1Hn1crosscos[4]; static double coeffn1Hn1crosssin[4]; static double coeffn2Hn2crosscos[4]; static double coeffn2Hn2crosssin[4]; static double coeffn3Hn3crosscos[4]; static double coeffn3Hn3crosssin[4]; static double coeffkn1const, coeffkn2const, coeffkn3const, coeffkp1plusp2const, coeffkp2plusp3const, coeffkp3plusp1const, coeffkp1const, coeffkp2const, coeffkp3const, coeffkRconst; static double coeffkn1cos[2]; static double coeffkn1sin[2]; static double coeffkn2cos[2]; static double coeffkn2sin[2]; static double coeffkn3cos[2]; static double coeffkn3sin[2]; static double coeffkp1plusp2cos[2]; static double coeffkp1plusp2sin[2]; static double coeffkp2plusp3cos[2]; static double coeffkp2plusp3sin[2]; static double coeffkp3plusp1cos[2]; static double coeffkp3plusp1sin[2]; static double coeffkp1cos[2]; static double coeffkp1sin[2]; static double coeffkp2cos[2]; static double coeffkp2sin[2]; static double coeffkp3cos[2]; static double coeffkp3sin[2]; static double coeffkRcos[2]; static double coeffkRsin[2]; static double cosarray[4]; static double sinarray[4]; #pragma omp threadprivate(coeffn1Hn1crossconst, coeffn1Hn1plusconst, coeffn2Hn2crossconst, coeffn2Hn2plusconst, coeffn3Hn3crossconst, coeffn3Hn3plusconst) #pragma omp threadprivate(coeffn1Hn1pluscos,coeffn1Hn1plussin,coeffn2Hn2pluscos,coeffn2Hn2plussin,coeffn3Hn3pluscos,coeffn3Hn3plussin) #pragma omp threadprivate(coeffn1Hn1crosscos,coeffn1Hn1crosssin,coeffn2Hn2crosscos,coeffn2Hn2crosssin,coeffn3Hn3crosscos,coeffn3Hn3crosssin) #pragma omp threadprivate(coeffkn1const, coeffkn2const, coeffkn3const, coeffkp1plusp2const, coeffkp2plusp3const, coeffkp3plusp1const, coeffkp1const, coeffkp2const, coeffkp3const, coeffkRconst) #pragma omp threadprivate(coeffkn1cos,coeffkn1sin,coeffkn2cos,coeffkn2sin,coeffkn3cos,coeffkn3sin) #pragma omp threadprivate(coeffkp1cos,coeffkp1sin,coeffkp2cos,coeffkp2sin,coeffkp3cos,coeffkp3sin) #pragma omp threadprivate(coeffkp1plusp2cos,coeffkp1plusp2sin,coeffkp2plusp3cos,coeffkp2plusp3sin,coeffkp3plusp1cos,coeffkp3plusp1sin) #pragma omp threadprivate(coeffkRcos,coeffkRsin,cosarray,sinarray) /*************************************************************/ /********* Functions for the geometric response **************/ /* Function to convert string input TDI string to TDItag */ TDItag ParseTDItag(char* string) { TDItag tag; if(strcmp(string, "delayO")==0) tag = delayO; else if(strcmp(string, "y12L")==0) tag = y12L; else if(strcmp(string, "y12")==0) tag = y12; else if(strcmp(string, "TDIXYZ")==0) tag = TDIXYZ; else if(strcmp(string, "TDIalphabetagamma")==0) tag = TDIalphabetagamma; else if(strcmp(string, "TDIAETXYZ")==0) tag = TDIAETXYZ; else if(strcmp(string, "TDIAETalphabetagamma")==0) tag = TDIAETalphabetagamma; else if(strcmp(string, "TDIX")==0) tag = TDIX; else if(strcmp(string, "TDIalpha")==0) tag = TDIalpha; else if(strcmp(string, "TDIAXYZ")==0) tag = TDIAXYZ; else if(strcmp(string, "TDIEXYZ")==0) tag = TDIEXYZ; else if(strcmp(string, "TDITXYZ")==0) tag = TDITXYZ; else if(strcmp(string, "TDIAalphabetagamma")==0) tag = TDIAalphabetagamma; else if(strcmp(string, "TDIEalphabetagamma")==0) tag = TDIEalphabetagamma; else if(strcmp(string, "TDITalphabetagamma")==0) tag = TDITalphabetagamma; else { printf("Error in ParseTDItag: string not recognized.\n"); exit(1); } return tag; } /* Function to convert string input ResponseApprox to tag */ ResponseApproxtag ParseResponseApproxtag(char* string) { ResponseApproxtag tag; if(strcmp(string, "full")==0) tag = full; else if(strcmp(string, "lowfL")==0) tag = lowfL; else if(strcmp(string, "lowf")==0) tag = lowf; else { printf("Error in ParseResponseApproxtag: string not recognized.\n"); exit(1); } return tag; } /* Compute Solar System Barycenter time tSSB from retarded time at the center of the LISA constellation tL */ double tSSBfromtL(const LISAconstellation *variant, const double tL, const double lambda, const double beta) { double phase=variant->ConstOmega*tL + variant->ConstPhi0 - lambda; double RoC=variant->OrbitR/C_SI; return tL + RoC*cos(beta)*cos(phase) - 1./2*variant->ConstOmega*pow(RoC*cos(beta), 2)*sin(2.*phase); } double tLfromtSSB(const LISAconstellation *variant, const double tSSB, const double lambda, const double beta) { double phase=variant->ConstOmega*tSSB + variant->ConstPhi0 - lambda; double RoC=variant->OrbitR/C_SI; return tSSB - RoC*cos(beta)*cos(phase); } /* Function cardinal sine */ double sinc(const double x) { if (x==0) return 1; else return sin(x)/x; } /* Function to compute, given a value of a sky position and polarization, all the complicated time-independent trigonometric coefficients entering the response */ void SetCoeffsG(const double lambda, const double beta, const double psi) { /* Precomputing cosines and sines */ double coslambda = cos(lambda); double sinlambda = sin(lambda); double cosbeta = cos(beta); double sinbeta = sin(beta); double cospsi = cos(psi); double sinpsi = sin(psi); /* Projection coefficients for hplus in n3.H.n3 */ /**/ coeffn3Hn3plusconst = 1./128 * (-4*cospsi*cospsi + 4*sinpsi*sinpsi -27*coslambda*coslambda*cospsi*cospsi -27*sinlambda*sinlambda*sinpsi*sinpsi -4*cosbeta*cosbeta*cospsi*cospsi -4*sinbeta*sinbeta*sinpsi*sinpsi + 4*cosbeta*cosbeta*sinpsi*sinpsi + 4*cospsi*cospsi*sinbeta*sinbeta + 27*coslambda*coslambda*sinpsi*sinpsi + 27*cospsi*cospsi*sinlambda*sinlambda -9*cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi -9*cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda -9*coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta -9*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi + 9*cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi + 9*cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi + 9*coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi + 9*cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda -54*sqrt3*coslambda*sinlambda*sinpsi*sinpsi + 54*sqrt3*coslambda*cospsi*cospsi*sinlambda -144*coslambda*cospsi*sinbeta*sinlambda*sinpsi -72*sqrt3*coslambda*coslambda*cospsi*sinbeta*sinpsi -18*sqrt3*coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi -18*sqrt3*cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda + 18*sqrt3*coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda + 18*sqrt3*cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi + 72*sqrt3*cospsi*sinbeta*sinlambda*sinlambda*sinpsi); /**/ coeffn3Hn3pluscos[0] = 1./16*cosbeta * (-9*cospsi*cospsi*sinbeta*sinlambda + 9*sinbeta*sinlambda*sinpsi*sinpsi + 18*coslambda*cospsi*sinpsi -7*sqrt3*coslambda*sinbeta*sinpsi*sinpsi + 7*sqrt3*coslambda*cospsi*cospsi*sinbeta + 14*sqrt3*cospsi*sinlambda*sinpsi); /**/ coeffn3Hn3pluscos[1] = -3./64 * (-3*sinpsi*sinpsi + 3*cospsi*cospsi -6*coslambda*coslambda*cospsi*cospsi -6*sinlambda*sinlambda*sinpsi*sinpsi -3*cosbeta*cosbeta*sinpsi*sinpsi -3*cospsi*cospsi*sinbeta*sinbeta + 3*cosbeta*cosbeta*cospsi*cospsi + 3*sinbeta*sinbeta*sinpsi*sinpsi + 6*coslambda*coslambda*sinpsi*sinpsi + 6*cospsi*cospsi*sinlambda*sinlambda -2*cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi -2*cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda -2*coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta -2*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi + 2*cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi + 2*cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi + 2*coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi + 2*cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda -32*coslambda*cospsi*sinbeta*sinlambda*sinpsi); /**/ coeffn3Hn3pluscos[2] = -1./16*cosbeta * (-6*coslambda*cospsi*sinpsi -3*sinbeta*sinlambda*sinpsi*sinpsi + 3*cospsi*cospsi*sinbeta*sinlambda + sqrt3*coslambda*cospsi*cospsi*sinbeta -sqrt3*coslambda*sinbeta*sinpsi*sinpsi + 2*sqrt3*cospsi*sinlambda*sinpsi); /**/ coeffn3Hn3pluscos[3] = 1./128 * (-3*coslambda*coslambda*cospsi*cospsi -3*sinlambda*sinlambda*sinpsi*sinpsi + 3*coslambda*coslambda*sinpsi*sinpsi + 3*cospsi*cospsi*sinlambda*sinlambda + cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi + cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi + coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi + cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda -cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi -cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda -coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta -sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -6*sqrt3*coslambda*cospsi*cospsi*sinlambda + 6*sqrt3*coslambda*sinlambda*sinpsi*sinpsi -16*coslambda*cospsi*sinbeta*sinlambda*sinpsi -8*sqrt3*cospsi*sinbeta*sinlambda*sinlambda*sinpsi -2*sqrt3*coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda -2*sqrt3*cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi + 2*sqrt3*coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi + 2*sqrt3*cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda + 8*sqrt3*coslambda*coslambda*cospsi*sinbeta*sinpsi); /**/ coeffn3Hn3plussin[0] = -1./16*cosbeta * (-9*coslambda*sinbeta*sinpsi*sinpsi + 9*coslambda*cospsi*cospsi*sinbeta + 18*cospsi*sinlambda*sinpsi + sqrt3*sinbeta*sinlambda*sinpsi*sinpsi -sqrt3*cospsi*cospsi*sinbeta*sinlambda + 2*sqrt3*coslambda*cospsi*sinpsi); /**/ coeffn3Hn3plussin[1] = 3./64 * (-3*sqrt3*sinpsi*sinpsi + 3*sqrt3*cospsi*cospsi -12*coslambda*sinlambda*sinpsi*sinpsi -3*sqrt3*cosbeta*cosbeta*sinpsi*sinpsi -3*sqrt3*cospsi*cospsi*sinbeta*sinbeta + 3*sqrt3*cosbeta*cosbeta*cospsi*cospsi + 3*sqrt3*sinbeta*sinbeta*sinpsi*sinpsi + 12*coslambda*cospsi*cospsi*sinlambda -16*coslambda*coslambda*cospsi*sinbeta*sinpsi -4*coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi -4*cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda + 4*coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda + 4*cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi + 16*cospsi*sinbeta*sinlambda*sinlambda*sinpsi); /**/ coeffn3Hn3plussin[2] = 1./16*cosbeta * (-3*coslambda*sinbeta*sinpsi*sinpsi + 3*coslambda*cospsi*cospsi*sinbeta + 6*cospsi*sinlambda*sinpsi + sqrt3*sinbeta*sinlambda*sinpsi*sinpsi -sqrt3*cospsi*cospsi*sinbeta*sinlambda + 2*sqrt3*coslambda*cospsi*sinpsi); /**/ coeffn3Hn3plussin[3] = 1./128 * (-6*coslambda*cospsi*cospsi*sinlambda -3*sqrt3*coslambda*coslambda*sinpsi*sinpsi -3*sqrt3*cospsi*cospsi*sinlambda*sinlambda + 3*sqrt3*coslambda*coslambda*cospsi*cospsi + 3*sqrt3*sinlambda*sinlambda*sinpsi*sinpsi + 6*coslambda*sinlambda*sinpsi*sinpsi + sqrt3*cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi + sqrt3*cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda + sqrt3*coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta + sqrt3*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -8*cospsi*sinbeta*sinlambda*sinlambda*sinpsi -2*coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda -2*cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi -sqrt3*cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi -sqrt3*cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi -sqrt3*coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi -sqrt3*cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda + 2*coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi + 2*cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda + 8*coslambda*coslambda*cospsi*sinbeta*sinpsi + 16*sqrt3*coslambda*cospsi*sinbeta*sinlambda*sinpsi); /* Projection coefficients for hcross in n3.H.n3 */ /**/ coeffn3Hn3crossconst = 1./64 * (4*cospsi*sinpsi -27*cospsi*sinlambda*sinlambda*sinpsi -4*cospsi*sinbeta*sinbeta*sinpsi + 4*cosbeta*cosbeta*cospsi*sinpsi + 27*coslambda*coslambda*cospsi*sinpsi -36*coslambda*cospsi*cospsi*sinbeta*sinlambda -18*sqrt3*coslambda*coslambda*cospsi*cospsi*sinbeta -18*sqrt3*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -9*cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi -9*cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi + 9*cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi + 9*coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi + 18*sqrt3*coslambda*coslambda*sinbeta*sinpsi*sinpsi + 18*sqrt3*cospsi*cospsi*sinbeta*sinlambda*sinlambda + 36*coslambda*sinbeta*sinlambda*sinpsi*sinpsi -54*sqrt3*coslambda*cospsi*sinlambda*sinpsi -18*sqrt3*coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi + 18*sqrt3*cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi); /**/ coeffn3Hn3crosscos[0] = 1./16*cosbeta * (-9*coslambda*sinpsi*sinpsi + 9*coslambda*cospsi*cospsi -7*sqrt3*sinlambda*sinpsi*sinpsi + 7*sqrt3*cospsi*cospsi*sinlambda + 18*cospsi*sinbeta*sinlambda*sinpsi -14*sqrt3*coslambda*cospsi*sinbeta*sinpsi); /**/ coeffn3Hn3crosscos[1] = -3./32 * (-3*cospsi*sinpsi -6*cospsi*sinlambda*sinlambda*sinpsi -3*cosbeta*cosbeta*cospsi*sinpsi + 3*cospsi*sinbeta*sinbeta*sinpsi + 6*coslambda*coslambda*cospsi*sinpsi -8*coslambda*cospsi*cospsi*sinbeta*sinlambda -2*cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi -2*cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi + 2*cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi + 2*coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi + 8*coslambda*sinbeta*sinlambda*sinpsi*sinpsi); /**/ coeffn3Hn3crosscos[2] = 1./16*cosbeta * (-3*coslambda*sinpsi*sinpsi + 3*coslambda*cospsi*cospsi + sqrt3*sinlambda*sinpsi*sinpsi -sqrt3*cospsi*cospsi*sinlambda + 6*cospsi*sinbeta*sinlambda*sinpsi + 2*sqrt3*coslambda*cospsi*sinbeta*sinpsi); /**/ coeffn3Hn3crosscos[3] = 1./64 * (-3*cospsi*sinlambda*sinlambda*sinpsi + 3*coslambda*coslambda*cospsi*sinpsi + cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi + coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi -4*coslambda*cospsi*cospsi*sinbeta*sinlambda -2*sqrt3*coslambda*coslambda*sinbeta*sinpsi*sinpsi -2*sqrt3*cospsi*cospsi*sinbeta*sinlambda*sinlambda -cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi -cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi + 2*sqrt3*coslambda*coslambda*cospsi*cospsi*sinbeta + 2*sqrt3*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi + 4*coslambda*sinbeta*sinlambda*sinpsi*sinpsi + 6*sqrt3*coslambda*cospsi*sinlambda*sinpsi -2*sqrt3*cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi + 2*sqrt3*coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi); /**/ coeffn3Hn3crosssin[0] = -1./16*cosbeta * (-9*sinlambda*sinpsi*sinpsi + 9*cospsi*cospsi*sinlambda + sqrt3*coslambda*cospsi*cospsi -sqrt3*coslambda*sinpsi*sinpsi -18*coslambda*cospsi*sinbeta*sinpsi + 2*sqrt3*cospsi*sinbeta*sinlambda*sinpsi); /**/ coeffn3Hn3crosssin[1] = -3./32 * (-4*coslambda*coslambda*sinbeta*sinpsi*sinpsi -4*cospsi*cospsi*sinbeta*sinlambda*sinlambda + 3*sqrt3*cospsi*sinpsi + 4*coslambda*coslambda*cospsi*cospsi*sinbeta + 4*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -3*sqrt3*cospsi*sinbeta*sinbeta*sinpsi + 3*sqrt3*cosbeta*cosbeta*cospsi*sinpsi + 12*coslambda*cospsi*sinlambda*sinpsi -4*cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi + 4*coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi); /**/ coeffn3Hn3crosssin[2] = 1./16*cosbeta * (-3*sinlambda*sinpsi*sinpsi + 3*cospsi*cospsi*sinlambda + sqrt3*coslambda*cospsi*cospsi -sqrt3*coslambda*sinpsi*sinpsi -6*coslambda*cospsi*sinbeta*sinpsi + 2*sqrt3*cospsi*sinbeta*sinlambda*sinpsi); /**/ coeffn3Hn3crosssin[3] = 1./64 * (-2*coslambda*coslambda*sinbeta*sinpsi*sinpsi -2*cospsi*cospsi*sinbeta*sinlambda*sinlambda + 2*coslambda*coslambda*cospsi*cospsi*sinbeta + 2*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -3*sqrt3*coslambda*coslambda*cospsi*sinpsi + 3*sqrt3*cospsi*sinlambda*sinlambda*sinpsi + 6*coslambda*cospsi*sinlambda*sinpsi + sqrt3*cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi + sqrt3*cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi -4*sqrt3*coslambda*sinbeta*sinlambda*sinpsi*sinpsi -2*cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi -sqrt3*cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi -sqrt3*coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi + 2*coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi + 4*sqrt3*coslambda*cospsi*cospsi*sinbeta*sinlambda); /* Projection coefficients for hplus in n2.H.n2 */ /**/ coeffn2Hn2plusconst = 1./128 * (-4*cospsi*cospsi + 4*sinpsi*sinpsi -27*coslambda*coslambda*cospsi*cospsi -27*sinlambda*sinlambda*sinpsi*sinpsi -4*cosbeta*cosbeta*cospsi*cospsi -4*sinbeta*sinbeta*sinpsi*sinpsi + 4*cosbeta*cosbeta*sinpsi*sinpsi + 4*cospsi*cospsi*sinbeta*sinbeta + 27*coslambda*coslambda*sinpsi*sinpsi + 27*cospsi*cospsi*sinlambda*sinlambda -9*cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi -9*cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda -9*coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta -9*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi + 9*cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi + 9*cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi + 9*coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi + 9*cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda -54*sqrt3*coslambda*cospsi*cospsi*sinlambda + 54*sqrt3*coslambda*sinlambda*sinpsi*sinpsi -144*coslambda*cospsi*sinbeta*sinlambda*sinpsi -72*sqrt3*cospsi*sinbeta*sinlambda*sinlambda*sinpsi -18*sqrt3*coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda -18*sqrt3*cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi + 18*sqrt3*coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi + 18*sqrt3*cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda + 72*sqrt3*coslambda*coslambda*cospsi*sinbeta*sinpsi); /**/ coeffn2Hn2pluscos[0] = 1./16*cosbeta * (-18*coslambda*cospsi*sinpsi -9*sinbeta*sinlambda*sinpsi*sinpsi + 9*cospsi*cospsi*sinbeta*sinlambda -7*sqrt3*coslambda*sinbeta*sinpsi*sinpsi + 7*sqrt3*coslambda*cospsi*cospsi*sinbeta + 14*sqrt3*cospsi*sinlambda*sinpsi); /**/ coeffn2Hn2pluscos[1] = -3./64 * (-3*sinpsi*sinpsi + 3*cospsi*cospsi -6*coslambda*coslambda*cospsi*cospsi -6*sinlambda*sinlambda*sinpsi*sinpsi -3*cosbeta*cosbeta*sinpsi*sinpsi -3*cospsi*cospsi*sinbeta*sinbeta + 3*cosbeta*cosbeta*cospsi*cospsi + 3*sinbeta*sinbeta*sinpsi*sinpsi + 6*coslambda*coslambda*sinpsi*sinpsi + 6*cospsi*cospsi*sinlambda*sinlambda -2*cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi -2*cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda -2*coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta -2*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi + 2*cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi + 2*cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi + 2*coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi + 2*cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda -32*coslambda*cospsi*sinbeta*sinlambda*sinpsi); /**/ coeffn2Hn2pluscos[2] = -1./16*cosbeta * (-3*cospsi*cospsi*sinbeta*sinlambda + 3*sinbeta*sinlambda*sinpsi*sinpsi + 6*coslambda*cospsi*sinpsi + sqrt3*coslambda*cospsi*cospsi*sinbeta -sqrt3*coslambda*sinbeta*sinpsi*sinpsi + 2*sqrt3*cospsi*sinlambda*sinpsi); /**/ coeffn2Hn2pluscos[3] = 1./128 * (-3*coslambda*coslambda*cospsi*cospsi -3*sinlambda*sinlambda*sinpsi*sinpsi + 3*coslambda*coslambda*sinpsi*sinpsi + 3*cospsi*cospsi*sinlambda*sinlambda + cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi + cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi + coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi + cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda -cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi -cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda -coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta -sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -6*sqrt3*coslambda*sinlambda*sinpsi*sinpsi + 6*sqrt3*coslambda*cospsi*cospsi*sinlambda -16*coslambda*cospsi*sinbeta*sinlambda*sinpsi -8*sqrt3*coslambda*coslambda*cospsi*sinbeta*sinpsi -2*sqrt3*coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi -2*sqrt3*cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda + 2*sqrt3*coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda + 2*sqrt3*cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi + 8*sqrt3*cospsi*sinbeta*sinlambda*sinlambda*sinpsi); /**/ coeffn2Hn2plussin[0] = 1./16*cosbeta * (-9*coslambda*sinbeta*sinpsi*sinpsi + 9*coslambda*cospsi*cospsi*sinbeta + 18*cospsi*sinlambda*sinpsi + sqrt3*cospsi*cospsi*sinbeta*sinlambda -2*sqrt3*coslambda*cospsi*sinpsi -sqrt3*sinbeta*sinlambda*sinpsi*sinpsi); /**/ coeffn2Hn2plussin[1] = -3./64 * (-3*sqrt3*sinpsi*sinpsi + 3*sqrt3*cospsi*cospsi -12*coslambda*cospsi*cospsi*sinlambda -3*sqrt3*cosbeta*cosbeta*sinpsi*sinpsi -3*sqrt3*cospsi*cospsi*sinbeta*sinbeta + 3*sqrt3*cosbeta*cosbeta*cospsi*cospsi + 3*sqrt3*sinbeta*sinbeta*sinpsi*sinpsi + 12*coslambda*sinlambda*sinpsi*sinpsi -16*cospsi*sinbeta*sinlambda*sinlambda*sinpsi -4*coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda -4*cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi + 4*coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi + 4*cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda + 16*coslambda*coslambda*cospsi*sinbeta*sinpsi); /**/ coeffn2Hn2plussin[2] = -1./16*cosbeta * (-3*coslambda*sinbeta*sinpsi*sinpsi + 3*coslambda*cospsi*cospsi*sinbeta + 6*cospsi*sinlambda*sinpsi + sqrt3*cospsi*cospsi*sinbeta*sinlambda -2*sqrt3*coslambda*cospsi*sinpsi -sqrt3*sinbeta*sinlambda*sinpsi*sinpsi); /**/ coeffn2Hn2plussin[3] = 1./128 * (-6*coslambda*cospsi*cospsi*sinlambda -3*sqrt3*coslambda*coslambda*cospsi*cospsi -3*sqrt3*sinlambda*sinlambda*sinpsi*sinpsi + 3*sqrt3*coslambda*coslambda*sinpsi*sinpsi + 3*sqrt3*cospsi*cospsi*sinlambda*sinlambda + 6*coslambda*sinlambda*sinpsi*sinpsi + sqrt3*cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi + sqrt3*cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi + sqrt3*coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi + sqrt3*cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda -8*cospsi*sinbeta*sinlambda*sinlambda*sinpsi -2*coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda -2*cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi -sqrt3*cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi -sqrt3*cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda -sqrt3*coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta -sqrt3*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi + 2*coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi + 2*cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda + 8*coslambda*coslambda*cospsi*sinbeta*sinpsi -16*sqrt3*coslambda*cospsi*sinbeta*sinlambda*sinpsi); /* Projection coefficients for hcross in n2.H.n2 */ /**/ coeffn2Hn2crossconst = 1./64 * (4*cospsi*sinpsi -27*cospsi*sinlambda*sinlambda*sinpsi -4*cospsi*sinbeta*sinbeta*sinpsi + 4*cosbeta*cosbeta*cospsi*sinpsi + 27*coslambda*coslambda*cospsi*sinpsi -36*coslambda*cospsi*cospsi*sinbeta*sinlambda -18*sqrt3*coslambda*coslambda*sinbeta*sinpsi*sinpsi -18*sqrt3*cospsi*cospsi*sinbeta*sinlambda*sinlambda -9*cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi -9*cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi + 9*cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi + 9*coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi + 18*sqrt3*coslambda*coslambda*cospsi*cospsi*sinbeta + 18*sqrt3*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi + 36*coslambda*sinbeta*sinlambda*sinpsi*sinpsi + 54*sqrt3*coslambda*cospsi*sinlambda*sinpsi -18*sqrt3*cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi + 18*sqrt3*coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi); /**/ coeffn2Hn2crosscos[0] = -1./16*cosbeta * (-9*coslambda*sinpsi*sinpsi + 9*coslambda*cospsi*cospsi -7*sqrt3*cospsi*cospsi*sinlambda + 7*sqrt3*sinlambda*sinpsi*sinpsi + 18*cospsi*sinbeta*sinlambda*sinpsi + 14*sqrt3*coslambda*cospsi*sinbeta*sinpsi); /**/ coeffn2Hn2crosscos[1] = -3./32 * (-3*cospsi*sinpsi -6*cospsi*sinlambda*sinlambda*sinpsi -3*cosbeta*cosbeta*cospsi*sinpsi + 3*cospsi*sinbeta*sinbeta*sinpsi + 6*coslambda*coslambda*cospsi*sinpsi -8*coslambda*cospsi*cospsi*sinbeta*sinlambda -2*cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi -2*cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi + 2*cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi + 2*coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi + 8*coslambda*sinbeta*sinlambda*sinpsi*sinpsi); /**/ coeffn2Hn2crosscos[2] = -1./16*cosbeta * (-3*coslambda*sinpsi*sinpsi + 3*coslambda*cospsi*cospsi + sqrt3*cospsi*cospsi*sinlambda -sqrt3*sinlambda*sinpsi*sinpsi + 6*cospsi*sinbeta*sinlambda*sinpsi -2*sqrt3*coslambda*cospsi*sinbeta*sinpsi); /**/ coeffn2Hn2crosscos[3] = 1./64 * (-3*cospsi*sinlambda*sinlambda*sinpsi + 3*coslambda*coslambda*cospsi*sinpsi + cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi + coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi -4*coslambda*cospsi*cospsi*sinbeta*sinlambda -2*sqrt3*coslambda*coslambda*cospsi*cospsi*sinbeta -2*sqrt3*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi -cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi + 2*sqrt3*coslambda*coslambda*sinbeta*sinpsi*sinpsi + 2*sqrt3*cospsi*cospsi*sinbeta*sinlambda*sinlambda + 4*coslambda*sinbeta*sinlambda*sinpsi*sinpsi -6*sqrt3*coslambda*cospsi*sinlambda*sinpsi -2*sqrt3*coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi + 2*sqrt3*cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi); /**/ coeffn2Hn2crosssin[0] = -1./16*cosbeta * (-9*cospsi*cospsi*sinlambda + 9*sinlambda*sinpsi*sinpsi + sqrt3*coslambda*cospsi*cospsi -sqrt3*coslambda*sinpsi*sinpsi + 18*coslambda*cospsi*sinbeta*sinpsi + 2*sqrt3*cospsi*sinbeta*sinlambda*sinpsi); /**/ coeffn2Hn2crosssin[1] = -3./32 * (-4*coslambda*coslambda*sinbeta*sinpsi*sinpsi -4*cospsi*cospsi*sinbeta*sinlambda*sinlambda -3*sqrt3*cospsi*sinpsi + 4*coslambda*coslambda*cospsi*cospsi*sinbeta + 4*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -3*sqrt3*cosbeta*cosbeta*cospsi*sinpsi + 3*sqrt3*cospsi*sinbeta*sinbeta*sinpsi + 12*coslambda*cospsi*sinlambda*sinpsi -4*cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi + 4*coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi); /**/ coeffn2Hn2crosssin[2] = 1./16*cosbeta * (-3*cospsi*cospsi*sinlambda + 3*sinlambda*sinpsi*sinpsi + sqrt3*coslambda*cospsi*cospsi -sqrt3*coslambda*sinpsi*sinpsi + 6*coslambda*cospsi*sinbeta*sinpsi + 2*sqrt3*cospsi*sinbeta*sinlambda*sinpsi); /**/ coeffn2Hn2crosssin[3] = 1./64 * (-2*coslambda*coslambda*sinbeta*sinpsi*sinpsi -2*cospsi*cospsi*sinbeta*sinlambda*sinlambda + 2*coslambda*coslambda*cospsi*cospsi*sinbeta + 2*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -3*sqrt3*cospsi*sinlambda*sinlambda*sinpsi + 3*sqrt3*coslambda*coslambda*cospsi*sinpsi + 6*coslambda*cospsi*sinlambda*sinpsi + sqrt3*cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi + sqrt3*coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi -4*sqrt3*coslambda*cospsi*cospsi*sinbeta*sinlambda -2*cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi -sqrt3*cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi -sqrt3*cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi + 2*coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi + 4*sqrt3*coslambda*sinbeta*sinlambda*sinpsi*sinpsi); /* Projection coefficients for hplus in n1.H.n1 */ /**/ coeffn1Hn1plusconst = 1./64 * (-2*cospsi*cospsi + 2*sinpsi*sinpsi -27*coslambda*coslambda*sinpsi*sinpsi -27*cospsi*cospsi*sinlambda*sinlambda -2*cosbeta*cosbeta*cospsi*cospsi -2*sinbeta*sinbeta*sinpsi*sinpsi + 2*cosbeta*cosbeta*sinpsi*sinpsi + 2*cospsi*cospsi*sinbeta*sinbeta + 27*coslambda*coslambda*cospsi*cospsi + 27*sinlambda*sinlambda*sinpsi*sinpsi -9*cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi -9*cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi -9*coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi -9*cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda + 9*cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi + 9*cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda + 9*coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta + 9*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi + 144*coslambda*cospsi*sinbeta*sinlambda*sinpsi); /**/ coeffn1Hn1pluscos[0] = -1./8*sqrt3*cosbeta * (coslambda*cospsi*cospsi*sinbeta -coslambda*sinbeta*sinpsi*sinpsi + 2*cospsi*sinlambda*sinpsi); /**/ coeffn1Hn1pluscos[1] = -3./32 * (-3*cospsi*cospsi + 3*sinpsi*sinpsi -3*cosbeta*cosbeta*cospsi*cospsi -3*coslambda*coslambda*cospsi*cospsi -3*sinbeta*sinbeta*sinpsi*sinpsi -3*sinlambda*sinlambda*sinpsi*sinpsi + 3*cosbeta*cosbeta*sinpsi*sinpsi + 3*coslambda*coslambda*sinpsi*sinpsi + 3*cospsi*cospsi*sinbeta*sinbeta + 3*cospsi*cospsi*sinlambda*sinlambda + cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi + cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi + coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi + cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda -cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi -cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda -coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta -sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -16*coslambda*cospsi*sinbeta*sinlambda*sinpsi); /**/ coeffn1Hn1pluscos[2] = 1./8*sqrt3*cosbeta * (coslambda*cospsi*cospsi*sinbeta -coslambda*sinbeta*sinpsi*sinpsi + 2*cospsi*sinlambda*sinpsi); /**/ coeffn1Hn1pluscos[3] = 1./64 * (-3*coslambda*coslambda*sinpsi*sinpsi -3*cospsi*cospsi*sinlambda*sinlambda + 3*coslambda*coslambda*cospsi*cospsi + 3*sinlambda*sinlambda*sinpsi*sinpsi + cosbeta*cosbeta*coslambda*coslambda*sinpsi*sinpsi + cosbeta*cosbeta*cospsi*cospsi*sinlambda*sinlambda + coslambda*coslambda*cospsi*cospsi*sinbeta*sinbeta + sinbeta*sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -cosbeta*cosbeta*coslambda*coslambda*cospsi*cospsi -cosbeta*cosbeta*sinlambda*sinlambda*sinpsi*sinpsi -coslambda*coslambda*sinbeta*sinbeta*sinpsi*sinpsi -cospsi*cospsi*sinbeta*sinbeta*sinlambda*sinlambda + 16*coslambda*cospsi*sinbeta*sinlambda*sinpsi); /**/ coeffn1Hn1plussin[0] = 5./8*sqrt3*cosbeta * (cospsi*cospsi*sinbeta*sinlambda -2*coslambda*cospsi*sinpsi -sinbeta*sinlambda*sinpsi*sinpsi); /**/ coeffn1Hn1plussin[1] = -3./16 * (-3*coslambda*cospsi*cospsi*sinlambda + 3*coslambda*sinlambda*sinpsi*sinpsi + coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi + cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda -4*cospsi*sinbeta*sinlambda*sinlambda*sinpsi -coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda -cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi + 4*coslambda*coslambda*cospsi*sinbeta*sinpsi); /**/ coeffn1Hn1plussin[2] = 1./8*sqrt3*cosbeta * (cospsi*cospsi*sinbeta*sinlambda -2*coslambda*cospsi*sinpsi -sinbeta*sinlambda*sinpsi*sinpsi); /**/ coeffn1Hn1plussin[3] = 1./32 * (-3*coslambda*sinlambda*sinpsi*sinpsi + 3*coslambda*cospsi*cospsi*sinlambda + coslambda*cospsi*cospsi*sinbeta*sinbeta*sinlambda + cosbeta*cosbeta*coslambda*sinlambda*sinpsi*sinpsi -4*coslambda*coslambda*cospsi*sinbeta*sinpsi -coslambda*sinbeta*sinbeta*sinlambda*sinpsi*sinpsi -cosbeta*cosbeta*coslambda*cospsi*cospsi*sinlambda + 4*cospsi*sinbeta*sinlambda*sinlambda*sinpsi); /* Projection coefficients for hcross in n1.H.n1 */ /**/ coeffn1Hn1crossconst = 1./32 * (2*cospsi*sinpsi -27*coslambda*coslambda*cospsi*sinpsi -2*cospsi*sinbeta*sinbeta*sinpsi + 2*cosbeta*cosbeta*cospsi*sinpsi + 27*cospsi*sinlambda*sinlambda*sinpsi -36*coslambda*sinbeta*sinlambda*sinpsi*sinpsi -9*cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi -9*coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi + 9*cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi + 9*cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi + 36*coslambda*cospsi*cospsi*sinbeta*sinlambda); /**/ coeffn1Hn1crosscos[0] = -1./8*sqrt3*cosbeta * (cospsi*cospsi*sinlambda -sinlambda*sinpsi*sinpsi -2*coslambda*cospsi*sinbeta*sinpsi); /**/ coeffn1Hn1crosscos[1] = -3./16 * (3*cospsi*sinpsi -3*cospsi*sinbeta*sinbeta*sinpsi -3*cospsi*sinlambda*sinlambda*sinpsi + 3*cosbeta*cosbeta*cospsi*sinpsi + 3*coslambda*coslambda*cospsi*sinpsi + cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi + coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi -4*coslambda*cospsi*cospsi*sinbeta*sinlambda -cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi -cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi + 4*coslambda*sinbeta*sinlambda*sinpsi*sinpsi); /**/ coeffn1Hn1crosscos[2] = 1./8*sqrt3*cosbeta * (cospsi*cospsi*sinlambda -sinlambda*sinpsi*sinpsi -2*coslambda*cospsi*sinbeta*sinpsi); /**/ coeffn1Hn1crosscos[3] = 1./32 * (-3*coslambda*coslambda*cospsi*sinpsi + 3*cospsi*sinlambda*sinlambda*sinpsi + cospsi*sinbeta*sinbeta*sinlambda*sinlambda*sinpsi + cosbeta*cosbeta*coslambda*coslambda*cospsi*sinpsi -4*coslambda*sinbeta*sinlambda*sinpsi*sinpsi -cosbeta*cosbeta*cospsi*sinlambda*sinlambda*sinpsi -coslambda*coslambda*cospsi*sinbeta*sinbeta*sinpsi + 4*coslambda*cospsi*cospsi*sinbeta*sinlambda); /**/ coeffn1Hn1crosssin[0] = -5./8*sqrt3*cosbeta * (coslambda*cospsi*cospsi -coslambda*sinpsi*sinpsi + 2*cospsi*sinbeta*sinlambda*sinpsi); /**/ coeffn1Hn1crosssin[1] = -3./8 * (coslambda*coslambda*cospsi*cospsi*sinbeta + sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -coslambda*coslambda*sinbeta*sinpsi*sinpsi -cospsi*cospsi*sinbeta*sinlambda*sinlambda + 3*coslambda*cospsi*sinlambda*sinpsi + coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi -cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi); /**/ coeffn1Hn1crosssin[2] = -1./8*sqrt3*cosbeta * (coslambda*cospsi*cospsi -coslambda*sinpsi*sinpsi + 2*cospsi*sinbeta*sinlambda*sinpsi); /**/ coeffn1Hn1crosssin[3] = 1./16 * (coslambda*coslambda*sinbeta*sinpsi*sinpsi + cospsi*cospsi*sinbeta*sinlambda*sinlambda -coslambda*coslambda*cospsi*cospsi*sinbeta -sinbeta*sinlambda*sinlambda*sinpsi*sinpsi -3*coslambda*cospsi*sinlambda*sinpsi + cosbeta*cosbeta*coslambda*cospsi*sinlambda*sinpsi -coslambda*cospsi*sinbeta*sinbeta*sinlambda*sinpsi); /* Coefficients in k.n3 */ /**/ coeffkn3const = 3./8*cosbeta * (sinlambda -sqrt3*coslambda); /**/ coeffkn3cos[0] = 3./4 * (-sinbeta); /**/ coeffkn3cos[1] = -1./8*cosbeta * (-sinlambda -sqrt3*coslambda); /**/ coeffkn3sin[0] = -1./4*sqrt3 * (-sinbeta); /**/ coeffkn3sin[1] = 1./8*cosbeta * (-coslambda + sqrt3*sinlambda); /* Coefficients in k.n2 */ /**/ coeffkn2const = -3./8*cosbeta * (-sinlambda -sqrt3*coslambda); /**/ coeffkn2cos[0] = -3./4 * (-sinbeta); /**/ coeffkn2cos[1] = 1./8*cosbeta * (sinlambda -sqrt3*coslambda); /**/ coeffkn2sin[0] = -1./4*sqrt3 * (-sinbeta); /**/ coeffkn2sin[1] = 1./8*cosbeta * (-coslambda -sqrt3*sinlambda); /* Coefficients in k.n1 */ /**/ coeffkn1const = 3./4*cosbeta * (-sinlambda); /**/ coeffkn1cos[0] = 0. ; /**/ coeffkn1cos[1] = 1./4*cosbeta * (-sinlambda); /**/ coeffkn1sin[0] = 1./2*sqrt3 * (-sinbeta); /**/ coeffkn1sin[1] = -1./4*cosbeta * (-coslambda); /* Coefficients in k.(p1+p2) */ /**/ coeffkp1plusp2const = -1./8*cosbeta * (-3*sinlambda -sqrt3*coslambda); /**/ coeffkp1plusp2cos[0] = -1./4 * (-sinbeta); /**/ coeffkp1plusp2cos[1] = 1./24*cosbeta * (3*sinlambda -sqrt3*coslambda); /**/ coeffkp1plusp2sin[0] = -1./4*sqrt3 * (-sinbeta); /**/ coeffkp1plusp2sin[1] = 1./24*cosbeta * (-3*coslambda -sqrt3*sinlambda); /* Coefficients in k.(p2+p3) */ /**/ coeffkp2plusp3const = 1./4*sqrt3*cosbeta * (-coslambda); /**/ coeffkp2plusp3cos[0] = 1./2 * (-sinbeta); /**/ coeffkp2plusp3cos[1] = -1./4/sqrt3 * (-cosbeta*coslambda); /**/ coeffkp2plusp3sin[0] = 0. ; /**/ coeffkp2plusp3sin[1] = -1./4/sqrt3 * (-cosbeta*sinlambda); /* Coefficients in k.(p3+p1) */ /**/ coeffkp3plusp1const = -1./8*cosbeta * (3*sinlambda -sqrt3*coslambda); /**/ coeffkp3plusp1cos[0] = -1./4 * (-sinbeta); /**/ coeffkp3plusp1cos[1] = 1./24*cosbeta * (-3*sinlambda -sqrt3*coslambda); /**/ coeffkp3plusp1sin[0] = 1./4*sqrt3 * (-sinbeta); /**/ coeffkp3plusp1sin[1] = -1./24*cosbeta * (-3*coslambda + sqrt3*sinlambda); /* Coefficients in k.p1 */ /**/ coeffkp1const = -1./4*sqrt3 * (-cosbeta*coslambda); /**/ coeffkp1cos[0] = -1./2 * (-sinbeta); /**/ coeffkp1cos[1] = 1./(4*sqrt3) * (-cosbeta*coslambda); /**/ coeffkp1sin[0] = 0. ; /**/ coeffkp1sin[1] = 1./(4*sqrt3) * (-cosbeta*sinlambda); /* Coefficients in k.p2 */ /**/ coeffkp2const = 1./8*cosbeta * (3*sinlambda -sqrt3*coslambda); /**/ coeffkp2cos[0] = 1./4 * (-sinbeta); /**/ coeffkp2cos[1] = -1./24*cosbeta * (-3*sinlambda -sqrt3*coslambda); /**/ coeffkp2sin[0] = -1./4*sqrt3 * (-sinbeta); /**/ coeffkp2sin[1] = 1./24*cosbeta * (-3*coslambda + sqrt3*sinlambda); /* Coefficients in k.p3 */ /**/ coeffkp3const = 1./8*cosbeta * (-3*sinlambda -sqrt3*coslambda); /**/ coeffkp3cos[0] = 1./4 * (-sinbeta); /**/ coeffkp3cos[1] = -1./24*cosbeta * (3*sinlambda -sqrt3*coslambda); /**/ coeffkp3sin[0] = 1./4*sqrt3 * (-sinbeta); /**/ coeffkp3sin[1] = -1./24*cosbeta * (-3*coslambda -sqrt3*sinlambda); /* Coefficients in k.R */ /**/ coeffkRconst = 0.; coeffkRcos[0] = 1. * (-cosbeta*coslambda); coeffkRsin[0] = 1. * (-cosbeta*sinlambda); coeffkRcos[1] = 0.; coeffkRsin[1] = 0.; } /*********************** Fourier-domain response ************************/ /* Individual functions GABmode: older version, does not include the orbital delay (was treated separately as Bessel phase) */ /* Collective function EvaluateGABmode: orbital delay included */ /* Conventions changed: now MLDC conventions */ /* Function evaluating G21, combining the two polarization with the spherical harmonics factors */ double complex G21mode(const LISAconstellation *variant, const double f, const double t, const double complex Yfactorplus, const double complex Yfactorcross) { double phase=variant->ConstOmega*t + variant->ConstPhi0; for(int j=0; j<4; j++) { cosarray[j] = cos((j+1) * phase); sinarray[j] = sin((j+1) * phase); } double n3Pn3plus = coeffn3Hn3plusconst; double n3Pn3cross = coeffn3Hn3crossconst; for(int j=0; j<4; j++) { n3Pn3plus += cosarray[j] * coeffn3Hn3pluscos[j] + sinarray[j] * coeffn3Hn3plussin[j]; n3Pn3cross += cosarray[j] * coeffn3Hn3crosscos[j] + sinarray[j] * coeffn3Hn3crosssin[j]; } double kn3 = coeffkn3const; double kp1plusp2 = coeffkp1plusp2const; for(int j=0; j<2; j++) { kn3 += cosarray[j] * coeffkn3cos[j] + sinarray[j] * coeffkn3sin[j]; kp1plusp2 += cosarray[j] * coeffkp1plusp2cos[j] + sinarray[j] * coeffkp1plusp2sin[j]; } return I*PI*f*variant->ConstL/C_SI * (n3Pn3plus*Yfactorplus + n3Pn3cross*Yfactorcross) * sinc( PI*f*variant->ConstL/C_SI * (1.+kn3)) * cexp( I*PI*f*variant->ConstL/C_SI * (1.+kp1plusp2) ); } /* Function evaluating G12, combining the two polarization with the spherical harmonics factors */ double complex G12mode(const LISAconstellation *variant, const double f, const double t, const double complex Yfactorplus, const double complex Yfactorcross) { double phase=variant->ConstOmega*t + variant->ConstPhi0; for(int j=0; j<4; j++) { cosarray[j] = cos((j+1) * phase); sinarray[j] = sin((j+1) * phase); } double n3Pn3plus = coeffn3Hn3plusconst; double n3Pn3cross = coeffn3Hn3crossconst; for(int j=0; j<4; j++) { n3Pn3plus += cosarray[j] * coeffn3Hn3pluscos[j] + sinarray[j] * coeffn3Hn3plussin[j]; n3Pn3cross += cosarray[j] * coeffn3Hn3crosscos[j] + sinarray[j] * coeffn3Hn3crosssin[j]; } double kn3 = coeffkn3const; double kp1plusp2 = coeffkp1plusp2const; for(int j=0; j<2; j++) { kn3 += cosarray[j] * coeffkn3cos[j] + sinarray[j] * coeffkn3sin[j]; kp1plusp2 += cosarray[j] * coeffkp1plusp2cos[j] + sinarray[j] * coeffkp1plusp2sin[j]; } return I*PI*f*variant->ConstL/C_SI * (n3Pn3plus*Yfactorplus + n3Pn3cross*Yfactorcross) * sinc( PI*f*variant->ConstL/C_SI * (1.-kn3)) * cexp( I*PI*f*variant->ConstL/C_SI * (1.+kp1plusp2) ); } /* Function evaluating G32, combining the two polarization with the spherical harmonics factors */ double complex G32mode(const LISAconstellation *variant, const double f, const double t, const double complex Yfactorplus, const double complex Yfactorcross) { double phase=variant->ConstOmega*t + variant->ConstPhi0; for(int j=0; j<4; j++) { cosarray[j] = cos((j+1) * phase); sinarray[j] = sin((j+1) * phase); } double n1Pn1plus = coeffn1Hn1plusconst; double n1Pn1cross = coeffn1Hn1crossconst; for(int j=0; j<4; j++) { n1Pn1plus += cosarray[j] * coeffn1Hn1pluscos[j] + sinarray[j] * coeffn1Hn1plussin[j]; n1Pn1cross += cosarray[j] * coeffn1Hn1crosscos[j] + sinarray[j] * coeffn1Hn1crosssin[j]; } double kn1 = coeffkn1const; double kp2plusp3 = coeffkp2plusp3const; for(int j=0; j<2; j++) { kn1 += cosarray[j] * coeffkn1cos[j] + sinarray[j] * coeffkn1sin[j]; kp2plusp3 += cosarray[j] * coeffkp2plusp3cos[j] + sinarray[j] * coeffkp2plusp3sin[j]; } return I*PI*f*variant->ConstL/C_SI * (n1Pn1plus*Yfactorplus + n1Pn1cross*Yfactorcross) * sinc( PI*f*variant->ConstL/C_SI * (1.+kn1)) * cexp( I*PI*f*variant->ConstL/C_SI * (1.+kp2plusp3) ); } /* Function evaluating G23, combining the two polarization with the spherical harmonics factors */ double complex G23mode(const LISAconstellation *variant, const double f, const double t, const double complex Yfactorplus, const double complex Yfactorcross) { double phase=variant->ConstOmega*t + variant->ConstPhi0; for(int j=0; j<4; j++) { cosarray[j] = cos((j+1)* phase); sinarray[j] = sin((j+1)* phase); } double n1Pn1plus = coeffn1Hn1plusconst; double n1Pn1cross = coeffn1Hn1crossconst; for(int j=0; j<4; j++) { n1Pn1plus += cosarray[j] * coeffn1Hn1pluscos[j] + sinarray[j] * coeffn1Hn1plussin[j]; n1Pn1cross += cosarray[j] * coeffn1Hn1crosscos[j] + sinarray[j] * coeffn1Hn1crosssin[j]; } double kn1 = coeffkn1const; double kp2plusp3 = coeffkp2plusp3const; for(int j=0; j<2; j++) { kn1 += cosarray[j] * coeffkn1cos[j] + sinarray[j] * coeffkn1sin[j]; kp2plusp3 += cosarray[j] * coeffkp2plusp3cos[j] + sinarray[j] * coeffkp2plusp3sin[j]; } return I*PI*f*variant->ConstL/C_SI * (n1Pn1plus*Yfactorplus + n1Pn1cross*Yfactorcross) * sinc( PI*f*variant->ConstL/C_SI * (1.-kn1)) * cexp( I*PI*f*variant->ConstL/C_SI * (1.+kp2plusp3) ); } /* Function evaluating G13, combining the two polarization with the spherical harmonics factors */ double complex G13mode(const LISAconstellation *variant, const double f, const double t, const double complex Yfactorplus, const double complex Yfactorcross) { double phase=variant->ConstOmega*t + variant->ConstPhi0; for(int j=0; j<4; j++) { cosarray[j] = cos((j+1) * phase); sinarray[j] = sin((j+1) * phase); } double n2Pn2plus = coeffn2Hn2plusconst; double n2Pn2cross = coeffn2Hn2crossconst; for(int j=0; j<4; j++) { n2Pn2plus += cosarray[j] * coeffn2Hn2pluscos[j] + sinarray[j] * coeffn2Hn2plussin[j]; n2Pn2cross += cosarray[j] * coeffn2Hn2crosscos[j] + sinarray[j] * coeffn2Hn2crosssin[j]; } double kn2 = coeffkn2const; double kp3plusp1 = coeffkp3plusp1const; for(int j=0; j<2; j++) { kn2 += cosarray[j] * coeffkn2cos[j] + sinarray[j] * coeffkn2sin[j]; kp3plusp1 += cosarray[j] * coeffkp3plusp1cos[j] + sinarray[j] * coeffkp3plusp1sin[j]; } return I*PI*f*variant->ConstL/C_SI * (n2Pn2plus*Yfactorplus + n2Pn2cross*Yfactorcross) * sinc( PI*f*variant->ConstL/C_SI * (1.+kn2)) * cexp( I*PI*f*variant->ConstL/C_SI * (1.+kp3plusp1) ); } /* Function evaluating G31, combining the two polarization with the spherical harmonics factors */ double complex G31mode(const LISAconstellation *variant, const double f, const double t, const double complex Yfactorplus, const double complex Yfactorcross) { double phase=variant->ConstOmega*t + variant->ConstPhi0; for(int j=0; j<4; j++) { cosarray[j] = cos((j+1) * phase); sinarray[j] = sin((j+1) * phase); } double n2Pn2plus = coeffn2Hn2plusconst; double n2Pn2cross = coeffn2Hn2crossconst; for(int j=0; j<4; j++) { n2Pn2plus += cosarray[j] * coeffn2Hn2pluscos[j] + sinarray[j] * coeffn2Hn2plussin[j]; n2Pn2cross += cosarray[j] * coeffn2Hn2crosscos[j] + sinarray[j] * coeffn2Hn2crosssin[j]; } double kn2 = coeffkn2const; double kp3plusp1 = coeffkp3plusp1const; for(int j=0; j<2; j++) { kn2 += cosarray[j] * coeffkn2cos[j] + sinarray[j] * coeffkn2sin[j]; kp3plusp1 += cosarray[j] * coeffkp3plusp1cos[j] + sinarray[j] * coeffkp3plusp1sin[j]; } return I*PI*f*variant->ConstL/C_SI * (n2Pn2plus*Yfactorplus + n2Pn2cross*Yfactorcross) * sinc( PI*f*variant->ConstL/C_SI * (1.-kn2)) * cexp( I*PI*f*variant->ConstL/C_SI * (1.+kp3plusp1) ); } /* Function evaluating all coefficients G12, G21, G23, G32, G31, G13, combining the two polarization with the spherical harmonics factors */ /* Note: includes orbital delay */ int EvaluateGABmode( const LISAconstellation *variant, /* Description of LISA variant */ double complex* G12, /* Output for G12 */ double complex* G21, /* Output for G21 */ double complex* G23, /* Output for G23 */ double complex* G32, /* Output for G32 */ double complex* G31, /* Output for G31 */ double complex* G13, /* Output for G13 */ const double f, /* Frequency */ const double t, /* Time */ const double complex Yfactorplus, /* Spin-weighted spherical harmonic factor for plus */ const double complex Yfactorcross, /* Spin-weighted spherical harmonic factor for cross */ const int tagdelayR, /* Tag: when 1, include the phase term of the R-delay */ const ResponseApproxtag responseapprox) /* Tag to select possible low-f approximation level in FD response */ { double phase = variant->ConstOmega*t + variant->ConstPhi0; /* Precompute array of sine/cosine */ for(int j=0; j<4; j++) { cosarray[j] = cos((j+1) * phase); sinarray[j] = sin((j+1) * phase); } /* Scalar products with k */ double n1Pn1plus = coeffn1Hn1plusconst; double n1Pn1cross = coeffn1Hn1crossconst; double n2Pn2plus = coeffn2Hn2plusconst; double n2Pn2cross = coeffn2Hn2crossconst; double n3Pn3plus = coeffn3Hn3plusconst; double n3Pn3cross = coeffn3Hn3crossconst; for(int j=0; j<4; j++) { n1Pn1plus += cosarray[j] * coeffn1Hn1pluscos[j] + sinarray[j] * coeffn1Hn1plussin[j]; n1Pn1cross += cosarray[j] * coeffn1Hn1crosscos[j] + sinarray[j] * coeffn1Hn1crosssin[j]; n2Pn2plus += cosarray[j] * coeffn2Hn2pluscos[j] + sinarray[j] * coeffn2Hn2plussin[j]; n2Pn2cross += cosarray[j] * coeffn2Hn2crosscos[j] + sinarray[j] * coeffn2Hn2crosssin[j]; n3Pn3plus += cosarray[j] * coeffn3Hn3pluscos[j] + sinarray[j] * coeffn3Hn3plussin[j]; n3Pn3cross += cosarray[j] * coeffn3Hn3crosscos[j] + sinarray[j] * coeffn3Hn3crosssin[j]; } /* Scalar products with k */ double kn1 = coeffkn1const; double kn2 = coeffkn2const; double kn3 = coeffkn3const; double kp1plusp2 = coeffkp1plusp2const; double kp2plusp3 = coeffkp2plusp3const; double kp3plusp1 = coeffkp3plusp1const; double kR = coeffkRconst; for(int j=0; j<2; j++) { kn1 += cosarray[j] * coeffkn1cos[j] + sinarray[j] * coeffkn1sin[j]; kn2 += cosarray[j] * coeffkn2cos[j] + sinarray[j] * coeffkn2sin[j]; kn3 += cosarray[j] * coeffkn3cos[j] + sinarray[j] * coeffkn3sin[j]; kp1plusp2 += cosarray[j] * coeffkp1plusp2cos[j] + sinarray[j] * coeffkp1plusp2sin[j]; kp2plusp3 += cosarray[j] * coeffkp2plusp3cos[j] + sinarray[j] * coeffkp2plusp3sin[j]; kp3plusp1 += cosarray[j] * coeffkp3plusp1cos[j] + sinarray[j] * coeffkp3plusp1sin[j]; kR += cosarray[j] * coeffkRcos[j] + sinarray[j] * coeffkRsin[j]; } /* Common factors */ double complex factn1Pn1 = n1Pn1plus*Yfactorplus + n1Pn1cross*Yfactorcross; double complex factn2Pn2 = n2Pn2plus*Yfactorplus + n2Pn2cross*Yfactorcross; double complex factn3Pn3 = n3Pn3plus*Yfactorplus + n3Pn3cross*Yfactorcross; double prefactor = PI*f*variant->ConstL/C_SI; double prefactorR = 2*PI*f*variant->OrbitR/C_SI; double complex factorcexp12 = cexp(I*prefactor * (1.+kp1plusp2)); double complex factorcexp23 = cexp(I*prefactor * (1.+kp2plusp3)); double complex factorcexp31 = cexp(I*prefactor * (1.+kp3plusp1)); double factorsinc12 = sinc( prefactor * (1.-kn3)); double factorsinc21 = sinc( prefactor * (1.+kn3)); double factorsinc23 = sinc( prefactor * (1.-kn1)); double factorsinc32 = sinc( prefactor * (1.+kn1)); double factorsinc31 = sinc( prefactor * (1.-kn2)); double factorsinc13 = sinc( prefactor * (1.+kn2)); /* The tag tagdelayR allows to choose to include or not the R-delay phase term (here leading order) */ double complex factorcexpkR; if(tagdelayR) factorcexpkR = cexp(I*prefactorR * kR); else factorcexpkR = 1.; /* Take into account level of approximation in for low-f response - choices are full, lowfL or lowf */ if(responseapprox==lowf) { factorcexpkR = 1.; } if((responseapprox==lowfL)||(responseapprox==lowf)) { factorsinc12 = 1.; factorsinc21 = 1.; factorsinc23 = 1.; factorsinc32 = 1.; factorsinc31 = 1.; factorsinc13 = 1.; factorcexp12 = 1.; factorcexp23 = 1.; factorcexp31 = 1.; } /* Output result */ *G12 = I*prefactor * factorcexpkR * factn3Pn3 * factorsinc12 * factorcexp12; *G21 = I*prefactor * factorcexpkR * factn3Pn3 * factorsinc21 * factorcexp12; *G23 = I*prefactor * factorcexpkR * factn1Pn1 * factorsinc23 * factorcexp23; *G32 = I*prefactor * factorcexpkR * factn1Pn1 * factorsinc32 * factorcexp23; *G31 = I*prefactor * factorcexpkR * factn2Pn2 * factorsinc31 * factorcexp31; *G13 = I*prefactor * factorcexpkR * factn2Pn2 * factorsinc13 * factorcexp31; return SUCCESS; } /*********************** Fourier-domain TDI factors ************************/ /* Functions evaluating the Fourier-domain factors (combinations of the GAB's) for TDI observables */ /* NOTE: factors have been scaled out, in parallel of what is done for the noise function */ /* Note: in case only one channel is considered, amplitudes for channels 2 and 3 are simply set to 0 */ /* (allows minimal changes from the old structure that assumed KTV A,E,T - but probably not optimal) */ int EvaluateTDIfactor3Chan( const LISAconstellation *variant, /* Description of LISA variant */ double complex* factor1, /* Output for factor for TDI channel 1 */ double complex* factor2, /* Output for factor for TDI channel 2 */ double complex* factor3, /* Output for factor for TDI channel 3 */ const double complex G12, /* Input for G12 */ const double complex G21, /* Input for G21 */ const double complex G23, /* Input for G23 */ const double complex G32, /* Input for G32 */ const double complex G31, /* Input for G31 */ const double complex G13, /* Input for G13 */ const double f, /* Frequency */ const TDItag tditag, /* Selector for the TDI observables */ const ResponseApproxtag responseapprox) /* Tag to select possible low-f approximation level in FD response */ { /* Notation: x=pifL, z=e^2ix*/ double x = PI*f*variant->ConstL/C_SI; double complex z = cexp(2*I*x); /* In both lowf and lowf-L approximations, ignore z factors - consitently ignore all TDI delays */ if((responseapprox==lowf)||(responseapprox==lowfL)) { x = 0.; z = 1.; } switch(tditag) { /* For testing purposes: basic yAB observable - no factor */ case y12: *factor1 = G12; *factor2 = 0.; *factor3 = 0.; break; /* For testing purposes: basic yABL observable - no factor, same as for yAB */ case y12L: *factor1 = G12; *factor2 = 0.; *factor3 = 0.; break; /* First-generation rescaled TDI aet from X,Y,Z */ /* With x=pifL, factors scaled out: A,E I*sqrt2*sin2x*e2ix - T 2*sqrt2*sin2x*sinx*e3ix */ case TDIAETXYZ: *factor1 = 0.5 * ( (1.+z)*(G31+G13) - G23 - z*G32 - G21 - z*G12 ); *factor2 = 0.5*invsqrt3 * ( (1.-z)*(G13-G31) + (2.+z)*(G12-G32) + (1.+2*z)*(G21-G23) ); *factor3 = invsqrt6 * ( G21-G12 + G32-G23 + G13-G31); break; /* First-generation rescaled TDI aet from alpha, beta, gamma */ /* With x=pifL, factors scaled out: A,E -I*2sqrt2*sinx*eix - T sinx/(sin3x*eix) */ case TDIAETalphabetagamma: *factor1 = 0.5 * (G13+G31 + z*(G12+G32) - (1.+z)*(G21+G13)); *factor2 = 0.5*invsqrt3 * ((2.+z)*(G12-G32) + (1.+z)*(G21-G23) + (1.+2*z)*(G13-G31)); *factor3 = invsqrt3 * (G21-G12 + G32-G23 + G13-G31); break; /* First-generation TDI XYZ */ /* With x=pifL, factor scaled out: 2I*sin2x*e2ix */ case TDIXYZ: *factor1 = G21 + z*G12 - G31 - z*G13; *factor2 = G32 + z*G23 - G12 - z*G21; *factor3 = G13 + z*G31 - G23 - z*G32; break; /* First-generation TDI alpha beta gamma */ case TDIalphabetagamma: *factor1 = G21-G31 + z*(G13-G12) + z*z*(G32-G23); *factor2 = G32-G12 + z*(G21-G23) + z*z*(G13-G31); *factor3 = G13-G23 + z*(G32-G31) + z*z*(G21-G12); break; /* First-generation TDI XYZ */ case TDIX: *factor1 = G21 + z*G12 - G31 - z*G13; *factor2 = 0.; *factor3 = 0.; break; /* First-generation TDI alpha beta gamma */ case TDIalpha: *factor1 = G21-G31 + z*(G13-G12) + z*z*(G32-G23); *factor2 = 0.; *factor3 = 0.; break; /* First-generation rescaled TDI aet from X,Y,Z */ /* With x=pifL, factors scaled out: A,E I*sqrt2*sin2x*eix - T 2*sqrt2*sin2x*sinx*e2ix */ case TDIAXYZ: *factor1 = 0.5 * ( (1.+z)*(G31+G13) - G23 - z*G32 - G21 - z*G12 ); *factor2 = 0.; *factor3 = 0.; break; case TDIEXYZ: *factor1 = 0.5*invsqrt3 * ( (1.-z)*(G13-G31) + (2.+z)*(G12-G32) + (1.+2*z)*(G12-G23) ); *factor2 = 0.; *factor3 = 0.; break; case TDITXYZ: *factor1 = invsqrt6 * ( G21-G12 + G32-G23 + G13-G31); *factor2 = 0.; *factor3 = 0.; break; /* First-generation rescaled TDI aet from alpha, beta, gamma */ /* With x=pifL, factors scaled out: A,E -I*2sqrt2*sinx*eix - T sinx/(sin3x*eix) */ case TDIAalphabetagamma: *factor1 = 0.5 * (G13+G31 + z*(G12+G32) - (1.+z)*(G21+G13)); *factor2 = 0.; *factor3 = 0.; break; case TDIEalphabetagamma: *factor1 = 0.5*invsqrt3 * ((2.+z)*(G12-G32) + (1.+z)*(G21-G23) + (1.+2*z)*(G13-G31)); *factor2 = 0.; *factor3 = 0.; break; case TDITalphabetagamma: *factor1 = invsqrt3 * (G21-G12 + G32-G23 + G13-G31); *factor2 = 0.; *factor3 = 0.; break; default: printf("Error in EvaluateTDIfactor3Chan: tditag not recognized.\n"); exit(1); } return SUCCESS; } /* Function evaluating the Fourier-domain factors that have been scaled out of TDI observables */ /* The factors scaled out, parallel what is done for the noise functions */ /* Note: in case only one channel is considered, factors for channels 2 and 3 are simply set to 0 */ int ScaledTDIfactor3Chan( const LISAconstellation *variant, /* Description of LISA variant */ double complex* factor1, /* Output for factor for TDI factor 1 */ double complex* factor2, /* Output for factor for TDI factor 2 */ double complex* factor3, /* Output for factor for TDI factor 3 */ const double f, /* Frequency */ const TDItag tditag) /* Selector for the TDI observables */ { /* Notation: x=pifL */ double x = PI*f*variant->ConstL/C_SI; switch(tditag) { /* First-generation rescaled TDI aet from X,Y,Z */ case TDIAETXYZ: *factor1 = I*sqrt(2)*sin(2*x)*cexp(2*I*x); *factor2 = I*sqrt(2)*sin(2*x)*cexp(2*I*x); *factor3 = 2*sqrt(2)*sin(x)*sin(2*x)*cexp(3*I*x); break; /* First-generation rescaled TDI aet from alpha, beta, gamma */ case TDIAETalphabetagamma: *factor1 = -I*2*sqrt(2)*sin(x)*cexp(I*x); *factor2 = -I*2*sqrt(2)*sin(x)*cexp(I*x); *factor3 = sin(3*x)/sin(x)*cexp(I*x); break; /* First-generation TDI XYZ */ case TDIXYZ: *factor1 = 2*I*sin(2*x)*cexp(2*I*x); *factor2 = 2*I*sin(2*x)*cexp(2*I*x); *factor3 = 2*I*sin(2*x)*cexp(2*I*x); break; /* First-generation TDI alpha beta gamma */ case TDIalphabetagamma: *factor1 = 1.; *factor2 = 1.; *factor3 = 1.; break; /* First-generation TDI XYZ */ case TDIX: *factor1 = 2*I*sin(2*x)*cexp(2*I*x); *factor2 = 0.; *factor3 = 0.; break; /* First-generation TDI alpha beta gamma */ case TDIalpha: *factor1 = 1.; *factor2 = 0.; *factor3 = 0.; break; /* First-generation rescaled TDI aet from X,Y,Z */ /* With x=pifL, factors scaled out: A,E I*sqrt2*sin2x*eix - T 2*sqrt2*sin2x*sinx*e2ix */ case TDIAXYZ: *factor1 = I*sqrt(2)*sin(2*x)*cexp(2*I*x); *factor2 = 0.; *factor3 = 0.; break; case TDIEXYZ: *factor1 = I*sqrt(2)*sin(2*x)*cexp(2*I*x); *factor2 = 0.; *factor3 = 0.; break; case TDITXYZ: *factor1 = 2*sqrt(2)*sin(x)*sin(2*x)*cexp(3*I*x); *factor2 = 0.; *factor3 = 0.; break; /* First-generation rescaled TDI aet from alpha, beta, gamma */ /* With x=pifL, factors scaled out: A,E -I*2sqrt2*sinx*eix - T sinx/(sin3x*eix) */ case TDIAalphabetagamma: *factor1 = -I*2*sqrt(2)*sin(x)*cexp(I*x); *factor2 = 0.; *factor3 = 0.; break; case TDIEalphabetagamma: *factor1 = -I*2*sqrt(2)*sin(x)*cexp(I*x); *factor2 = 0.; *factor3 = 0.; break; case TDITalphabetagamma: *factor1 = sin(3*x)/sin(x)*cexp(I*x); *factor2 = 0.; *factor3 = 0.; break; default: printf("Error in EvaluateTDIfactor3Chan: tditag not recognized.\n"); exit(1); } return SUCCESS; } /* Function restoring the factor that have been scaled out of the TDI observables */ /* NOTE: the operation is made in-place, and the input is overwritten */ int RestoreInPlaceScaledFactorTDI( const LISAconstellation *variant, /* Description of LISA variant */ ListmodesCAmpPhaseFrequencySeries* listtdi, /* Output/Input: list of mode contributions to TDI observable */ TDItag tditag, /* Tag selecting the TDI observable */ int nchannel) /* TDI channel number */ { double complex factor1 = 0; double complex factor2 = 0; double complex factor3 = 0; double complex factor; double complex camp; ListmodesCAmpPhaseFrequencySeries* listelement = listtdi; /* Going throug the list of modes */ while(listelement) { gsl_vector* freq = listelement->freqseries->freq; gsl_vector* ampreal = listelement->freqseries->amp_real; gsl_vector* ampimag = listelement->freqseries->amp_imag; for(int i=0; i<freq->size; i++) { ScaledTDIfactor3Chan(variant,&factor1, &factor2, &factor3, gsl_vector_get(freq, i), tditag); switch(nchannel) { case 1: factor = factor1; break; case 2: factor = factor2; break; case 3: factor = factor3; break; } camp = factor * (gsl_vector_get(ampreal, i) + I*gsl_vector_get(ampimag, i)); gsl_vector_set(ampreal, i, creal(camp)); gsl_vector_set(ampimag, i, cimag(camp)); } listelement = listelement->next; } return SUCCESS; } /* Functions evaluating the Fourier-domain factors (combinations of the GAB's) for TDI observables */ /* int EvaluateTDIfactor1Chan( */ /* double complex* factor, /\* Output for factor for TDI channel *\/ */ /* const double complex G12, /\* Input for G12 *\/ */ /* const double complex G21, /\* Input for G21 *\/ */ /* const double complex G23, /\* Input for G23 *\/ */ /* const double complex G32, /\* Input for G32 *\/ */ /* const double complex G31, /\* Input for G31 *\/ */ /* const double complex G13, /\* Input for G13 *\/ */ /* const double f, /\* Frequency *\/ */ /* const TDItag tditag) /\* Selector for the TDI observables *\/ */ /* { */ /* /\* Notation: x=pifL, z = e^2ix*\/ */ /* double x = PI*f*variant->ConstL/C_SI; */ /* double complex z = cexp(2*I*x); */ /* double sin2x = sin(2*x); */ /* double complex commonfac; */ /* switch(tditag) { */ /* /\* First-generation TDI XYZ *\/ */ /* case TDIX: { */ /* commonfac = 2*I*z*sin2x; */ /* *factor = commonfac * (G21 + z*G12 - G31 - z*G13); } */ /* case TDIY: { */ /* commonfac = 2*I*z*sin2x; */ /* *factor = commonfac * (G32 + z*G23 - G12 - z*G21); } */ /* case TDIZ: { */ /* commonfac = 2*I*z*sin2x; */ /* *factor = commonfac * (G13 + z*G31 - G23 - z*G32); } */ /* /\* First-generation TDI alpha beta gamma *\/ */ /* case TDIalpha: { */ /* *factor = G21-G31 + z*(G13-G12) + z*z*(G32-G23); } */ /* case TDIbeta: { */ /* *factor = G32-G12 + z*(G21-G23) + z*z*(G13-G31); } */ /* case TDIgamma: { */ /* *factor = G13-G23 + z*(G32-G31) + z*z*(G21-G12); } */ /* /\* First-generation rescaled TDI aet from X,Y,Z *\/ */ /* /\* With x=pifL, factors scaled out: A,E I*sqrt2*sin2x*eix - T 2*sqrt2*sin2x*sinx*e2ix *\/ */ /* case TDIAXYZ: { */ /* *factor = 0.5 * ( (1.+z)*(G31+G13) - G23 - z*G32 - G21 - z*G12 ); } */ /* case TDIEXYZ: { */ /* *factor = 0.5*invsqrt3 * ( (1.-z)*(G13-G31) + (2.+z)*(G12-G32) + (1.+2*z)*(G12-G23) ); } */ /* case TDITXYZ: { */ /* *factor = invsqrt6 * ( G21-G12 + G32-G23 + G13-G31); } */ /* /\* First-generation rescaled TDI aet from alpha, beta, gamma *\/ */ /* /\* With x=pifL, factors scaled out: A,E -I*2sqrt2*sinx*eix - T sinx/(sin3x*eix) *\/ */ /* case TDIAalphabetagamma: { */ /* *factor = 0.5 * (G13+G31 + z*(G12+G32) - (1.+z)*(G21+G13)); } */ /* case TDIEalphabetagamma: { */ /* *factor = 0.5*invsqrt3 * ((2+z)*(G12-G32) + (1+z)*(G21-G23) + (1.+2*z)*(G13-G31)); } */ /* case TDITalphabetagamma: { */ /* *factor = invsqrt3 * (G21-G12 + G32-G23 + G13-G31); } */ /* default: { */ /* printf("Error in EvaluateTDIfactor3Chan: tditag not recognized."); */ /* exit(1); } */ /* } */ /* } */ /*********************** Time-domain response ************************/ /* Processing single mode in amp/phase form through orbital time delay */ static double hOTDAmpPhase( const LISAconstellation *variant, /* Description of LISA variant */ double* amp, /* Output: amplitude */ double* phase, /* Output: phase */ gsl_spline* splineamp, /* Input spline for TD mode amplitude */ gsl_spline* splinephase, /* Input spline for TD mode phase */ gsl_interp_accel* accelamp, /* Accelerator for amp spline */ gsl_interp_accel* accelphase, /* Accelerator for phase spline */ const double t) /* Time */ { double tphase=variant->ConstOmega*t + variant->ConstPhi0; /* Precompute array of sine/cosine */ for(int j=0; j<4; j++) { cosarray[j] = cos((j+1) * tphase); sinarray[j] = sin((j+1) * tphase); } /* Scalar product k.R */ double kR = coeffkRconst; for(int j=0; j<2; j++) { kR += cosarray[j] * coeffkRcos[j] + sinarray[j] * coeffkRsin[j]; } /* Common factor and delay */ double delay = -(kR*variant->OrbitR)/C_SI; /* Output result */ *amp = gsl_spline_eval(splineamp, t+delay, accelamp); *phase = gsl_spline_eval(splinephase, t+delay, accelphase); } /* Functions evaluating yAB observables in time domain - constellation response only */ /* Note: includes both h22 and h2m2 contributions, assuming planar orbits so that h2-2 = h22* */ static double y12LTDfromh22AmpPhase( const LISAconstellation *variant, /* Description of LISA variant */ gsl_spline* splineamp, /* Input spline for h22 TD amp */ gsl_spline* splinephase, /* Input spline for h22 TD phase */ gsl_interp_accel* accelamp, /* Accelerator for amp spline */ gsl_interp_accel* accelphase, /* Accelerator for phase spline */ double complex Y22, /* Y22 factor needed to convert h22 to hplus, hcross */ double complex Y2m2, /* Y2-2 factor needed to convert h2-2 to hplus, hcross */ const double t) /* Time */ { /* Precompute array of sine/cosine */ double phase=variant->ConstOmega*t + variant->ConstPhi0; for(int j=0; j<4; j++) { cosarray[j] = cos((j+1) * phase); sinarray[j] = sin((j+1) * phase); } /* Scalar products with k */ double n3Pn3plus = coeffn3Hn3plusconst; double n3Pn3cross = coeffn3Hn3crossconst; for(int j=0; j<4; j++) { n3Pn3plus += cosarray[j] * coeffn3Hn3pluscos[j] + sinarray[j] * coeffn3Hn3plussin[j]; n3Pn3cross += cosarray[j] * coeffn3Hn3crosscos[j] + sinarray[j] * coeffn3Hn3crosssin[j]; } /* Scalar products with k */ double kn3 = coeffkn3const; double kp1 = coeffkp1const; double kp2 = coeffkp2const; for(int j=0; j<2; j++) { kn3 += cosarray[j] * coeffkn3cos[j] + sinarray[j] * coeffkn3sin[j]; kp1 += cosarray[j] * coeffkp1cos[j] + sinarray[j] * coeffkp1sin[j]; kp2 += cosarray[j] * coeffkp2cos[j] + sinarray[j] * coeffkp2sin[j]; } /* Common factor and delay */ double factorp = (1./(1.-kn3)) * 0.5*n3Pn3plus; double factorc = (1./(1.-kn3)) * 0.5*n3Pn3cross; double firstdelay = -((kp1 + 1)*variant->ConstL)/C_SI; double seconddelay = -(kp2*variant->ConstL)/C_SI; /* Values of Y22*h22 + Y2-2*h2-2 at 1 and 2 with delays, and hplus, hcross */ /* Note: includes both h22 and h2m2 contributions, assuming planar orbits so that h2-2 = h22* */ double A22at1 = gsl_spline_eval(splineamp, t+firstdelay, accelamp); double phi22at1 = gsl_spline_eval(splinephase, t+firstdelay, accelphase); double A22at2 = gsl_spline_eval(splineamp, t+seconddelay, accelamp); double phi22at2 = gsl_spline_eval(splinephase, t+seconddelay, accelphase); double complex Y22h22at1 = Y22 * A22at1 * cexp(I*phi22at1); double complex Y22h22at2 = Y22 * A22at2 * cexp(I*phi22at2); double complex Y2m2h2m2at1 = Y2m2 * A22at1 * cexp(-I*phi22at1); double complex Y2m2h2m2at2 = Y2m2 * A22at2 * cexp(-I*phi22at2); double hp1 = creal(Y22h22at1 + Y2m2h2m2at1); double hc1 = -cimag(Y22h22at1 + Y2m2h2m2at1); double hp2 = creal(Y22h22at2 + Y2m2h2m2at2); double hc2 = -cimag(Y22h22at2 + Y2m2h2m2at2); /* Result */ double y12 = factorp*(hp1 - hp2) + factorc*(hc1 - hc2); return y12; } /* Functions evaluating yAB observables in time domain - orbital and constellation response */ /* Note: includes both h22 and h2m2 contributions, assuming planar orbits so that h2-2 = h22* */ static double y12TDfromh22AmpPhase( const LISAconstellation *variant, /* Description of LISA variant */ gsl_spline* splineamp, /* Input spline for h22 TD amp */ gsl_spline* splinephase, /* Input spline for h22 TD phase */ gsl_interp_accel* accelamp, /* Accelerator for amp spline */ gsl_interp_accel* accelphase, /* Accelerator for phase spline */ double complex Y22, /* Y22 factor needed to convert h22 to hplus, hcross */ double complex Y2m2, /* Y2-2 factor needed to convert h2-2 to hplus, hcross */ const double t) /* Time */ { /* Precompute array of sine/cosine */ double phase=variant->ConstOmega*t + variant->ConstPhi0; for(int j=0; j<4; j++) { cosarray[j] = cos((j+1) * phase); sinarray[j] = sin((j+1) * phase); } /* Scalar product k.R */ double kR = coeffkRconst; for(int j=0; j<2; j++) { kR += cosarray[j] * coeffkRcos[j] + sinarray[j] * coeffkRsin[j]; } /* Common factor and delay */ double delay0 = -(kR*variant->OrbitR)/C_SI; /* Scalar products with k */ double n3Pn3plus = coeffn3Hn3plusconst; double n3Pn3cross = coeffn3Hn3crossconst; for(int j=0; j<4; j++) { n3Pn3plus += cosarray[j] * coeffn3Hn3pluscos[j] + sinarray[j] * coeffn3Hn3plussin[j]; n3Pn3cross += cosarray[j] * coeffn3Hn3crosscos[j] + sinarray[j] * coeffn3Hn3crosssin[j]; } /* Scalar products with k */ double kn3 = coeffkn3const; double kp1 = coeffkp1const; double kp2 = coeffkp2const; for(int j=0; j<2; j++) { kn3 += cosarray[j] * coeffkn3cos[j] + sinarray[j] * coeffkn3sin[j]; kp1 += cosarray[j] * coeffkp1cos[j] + sinarray[j] * coeffkp1sin[j]; kp2 += cosarray[j] * coeffkp2cos[j] + sinarray[j] * coeffkp2sin[j]; } /* Common factor and delay */ double factorp = (1./(1.-kn3)) * 0.5*n3Pn3plus; double factorc = (1./(1.-kn3)) * 0.5*n3Pn3cross; double firstdelay = delay0 - ((kp1 + 1)*variant->ConstL)/C_SI; double seconddelay = delay0 - (kp2*variant->ConstL)/C_SI; /* Values of Y22*h22 + Y2-2*h2-2 at 1 and 2 with delays, and hplus, hcross */ /* Note: includes both h22 and h2m2 contributions, assuming planar orbits so that h2-2 = h22* */ double A22at1 = gsl_spline_eval(splineamp, t+firstdelay, accelamp); double phi22at1 = gsl_spline_eval(splinephase, t+firstdelay, accelphase); double A22at2 = gsl_spline_eval(splineamp, t+seconddelay, accelamp); double phi22at2 = gsl_spline_eval(splinephase, t+seconddelay, accelphase); double complex Y22h22at1 = Y22 * A22at1 * cexp(I*phi22at1); double complex Y22h22at2 = Y22 * A22at2 * cexp(I*phi22at2); double complex Y2m2h2m2at1 = Y2m2 * A22at1 * cexp(-I*phi22at1); double complex Y2m2h2m2at2 = Y2m2 * A22at2 * cexp(-I*phi22at2); double hp1 = creal(Y22h22at1 + Y2m2h2m2at1); double hc1 = -cimag(Y22h22at1 + Y2m2h2m2at1); double hp2 = creal(Y22h22at2 + Y2m2h2m2at2); double hc2 = -cimag(Y22h22at2 + Y2m2h2m2at2); /* Result */ double y12 = factorp*(hp1 - hp2) + factorc*(hc1 - hc2); return y12; } /* Functions evaluating yAB observables in time domain */ double y12TD( const LISAconstellation *variant, /* Description of LISA variant */ gsl_spline* splinehp, /* Input spline for TD hplus */ gsl_spline* splinehc, /* Input spline for TD hcross */ gsl_interp_accel* accelhp, /* Accelerator for hp spline */ gsl_interp_accel* accelhc, /* Accelerator for hc spline */ const double t) /* Time */ { /* Precompute array of sine/cosine */ double phase=variant->ConstOmega*t + variant->ConstPhi0; for(int j=0; j<4; j++) { cosarray[j] = cos((j+1) * phase); sinarray[j] = sin((j+1) * phase); } /* Scalar products with k */ double n3Pn3plus = coeffn3Hn3plusconst; double n3Pn3cross = coeffn3Hn3crossconst; for(int j=0; j<4; j++) { n3Pn3plus += cosarray[j] * coeffn3Hn3pluscos[j] + sinarray[j] * coeffn3Hn3plussin[j]; n3Pn3cross += cosarray[j] * coeffn3Hn3crosscos[j] + sinarray[j] * coeffn3Hn3crosssin[j]; } /* Scalar products with k */ double kn3 = coeffkn3const; double kp1 = coeffkp1const; double kp2 = coeffkp2const; double kR = coeffkRconst; for(int j=0; j<2; j++) { kn3 += cosarray[j] * coeffkn3cos[j] + sinarray[j] * coeffkn3sin[j]; kp1 += cosarray[j] * coeffkp1cos[j] + sinarray[j] * coeffkp1sin[j]; kp2 += cosarray[j] * coeffkp2cos[j] + sinarray[j] * coeffkp2sin[j]; kR += cosarray[j] * coeffkRcos[j] + sinarray[j] * coeffkRsin[j]; } /* Common factor and delay */ double factorp = (1./(1.-kn3)) * 0.5*n3Pn3plus; double factorc = (1./(1.-kn3)) * 0.5*n3Pn3cross; double firstdelay = -(kR*variant->OrbitR + (kp1 + 1)*variant->ConstL)/C_SI; double seconddelay = -(kR*variant->OrbitR + kp2*variant->ConstL)/C_SI; /* Result */ double y12 = factorp*(gsl_spline_eval(splinehp, t+firstdelay, accelhp) - gsl_spline_eval(splinehp, t+seconddelay, accelhp)) + factorc*(gsl_spline_eval(splinehc, t+firstdelay, accelhc) - gsl_spline_eval(splinehc, t+seconddelay, accelhc)); return y12; } double y21TD( const LISAconstellation *variant, /* Description of LISA variant */ gsl_spline* splinehp, /* Input spline for TD hplus */ gsl_spline* splinehc, /* Input spline for TD hcross */ gsl_interp_accel* accelhp, /* Accelerator for hp spline */ gsl_interp_accel* accelhc, /* Accelerator for hc spline */ const double t) /* Time */ { /* Precompute array of sine/cosine */ double phase=variant->ConstOmega*t + variant->ConstPhi0; for(int j=0; j<4; j++) { cosarray[j] = cos((j+1) * phase); sinarray[j] = sin((j+1) * phase); } /* Scalar products with k */ double n3Pn3plus = coeffn3Hn3plusconst; double n3Pn3cross = coeffn3Hn3crossconst; for(int j=0; j<4; j++) { n3Pn3plus += cosarray[j] * coeffn3Hn3pluscos[j] + sinarray[j] * coeffn3Hn3plussin[j]; n3Pn3cross += cosarray[j] * coeffn3Hn3crosscos[j] + sinarray[j] * coeffn3Hn3crosssin[j]; } /* Scalar products with k */ double kn3 = coeffkn3const; double kp1 = coeffkp1const; double kp2 = coeffkp2const; double kR = coeffkRconst; for(int j=0; j<2; j++) { kn3 += cosarray[j] * coeffkn3cos[j] + sinarray[j] * coeffkn3sin[j]; kp1 += cosarray[j] * coeffkp1cos[j] + sinarray[j] * coeffkp1sin[j]; kp2 += cosarray[j] * coeffkp2cos[j] + sinarray[j] * coeffkp2sin[j]; kR += cosarray[j] * coeffkRcos[j] + sinarray[j] * coeffkRsin[j]; } /* Common factor and delay */ double factorp = (1./(1.+kn3)) * 0.5*n3Pn3plus; double factorc = (1./(1.+kn3)) * 0.5*n3Pn3cross; double firstdelay = -(kR*variant->OrbitR + (kp2 + 1)*variant->ConstL)/C_SI; double seconddelay = -(kR*variant->OrbitR + kp1*variant->ConstL)/C_SI; /* Result */ double y21 = factorp*(gsl_spline_eval(splinehp, t+firstdelay, accelhp) - gsl_spline_eval(splinehp, t+seconddelay, accelhp)) + factorc*(gsl_spline_eval(splinehc, t+firstdelay, accelhc) - gsl_spline_eval(splinehc, t+seconddelay, accelhc)); return y21; } double y23TD( const LISAconstellation *variant, /* Description of LISA variant */ gsl_spline* splinehp, /* Input spline for TD hplus */ gsl_spline* splinehc, /* Input spline for TD hcross */ gsl_interp_accel* accelhp, /* Accelerator for hp spline */ gsl_interp_accel* accelhc, /* Accelerator for hc spline */ const double t) /* Time */ { /* Precompute array of sine/cosine */ double phase=variant->ConstOmega*t + variant->ConstPhi0; for(int j=0; j<4; j++) { cosarray[j] = cos((j+1) * phase); sinarray[j] = sin((j+1) * phase); } /* Scalar products with k */ double n1Pn1plus = coeffn1Hn1plusconst; double n1Pn1cross = coeffn1Hn1crossconst; for(int j=0; j<4; j++) { n1Pn1plus += cosarray[j] * coeffn1Hn1pluscos[j] + sinarray[j] * coeffn1Hn1plussin[j]; n1Pn1cross += cosarray[j] * coeffn1Hn1crosscos[j] + sinarray[j] * coeffn1Hn1crosssin[j]; } /* Scalar products with k */ double kn1 = coeffkn1const; double kp2 = coeffkp2const; double kp3 = coeffkp3const; double kR = coeffkRconst; for(int j=0; j<2; j++) { kn1 += cosarray[j] * coeffkn1cos[j] + sinarray[j] * coeffkn1sin[j]; kp2 += cosarray[j] * coeffkp2cos[j] + sinarray[j] * coeffkp2sin[j]; kp3 += cosarray[j] * coeffkp3cos[j] + sinarray[j] * coeffkp3sin[j]; kR += cosarray[j] * coeffkRcos[j] + sinarray[j] * coeffkRsin[j]; } /* Common factor and delay */ double factorp = (1./(1.-kn1)) * 0.5*n1Pn1plus; double factorc = (1./(1.-kn1)) * 0.5*n1Pn1cross; double firstdelay = -(kR*variant->OrbitR + (kp2 + 1)*variant->ConstL)/C_SI; double seconddelay = -(kR*variant->OrbitR + kp3*variant->ConstL)/C_SI; /* Result */ double y23 = factorp*(gsl_spline_eval(splinehp, t+firstdelay, accelhp) - gsl_spline_eval(splinehp, t+seconddelay, accelhp)) + factorc*(gsl_spline_eval(splinehc, t+firstdelay, accelhc) - gsl_spline_eval(splinehc, t+seconddelay, accelhc)); return y23; } double y32TD( const LISAconstellation *variant, /* Description of LISA variant */ gsl_spline* splinehp, /* Input spline for TD hplus */ gsl_spline* splinehc, /* Input spline for TD hcross */ gsl_interp_accel* accelhp, /* Accelerator for hp spline */ gsl_interp_accel* accelhc, /* Accelerator for hc spline */ const double t) /* Time */ { /* Precompute array of sine/cosine */ double phase=variant->ConstOmega*t + variant->ConstPhi0; for(int j=0; j<4; j++) { cosarray[j] = cos((j+1) * phase); sinarray[j] = sin((j+1) * phase); } /* Scalar products with k */ double n1Pn1plus = coeffn1Hn1plusconst; double n1Pn1cross = coeffn1Hn1crossconst; for(int j=0; j<4; j++) { n1Pn1plus += cosarray[j] * coeffn1Hn1pluscos[j] + sinarray[j] * coeffn1Hn1plussin[j]; n1Pn1cross += cosarray[j] * coeffn1Hn1crosscos[j] + sinarray[j] * coeffn1Hn1crosssin[j]; } /* Scalar products with k */ double kn1 = coeffkn1const; double kp2 = coeffkp2const; double kp3 = coeffkp3const; double kR = coeffkRconst; for(int j=0; j<2; j++) { kn1 += cosarray[j] * coeffkn1cos[j] + sinarray[j] * coeffkn1sin[j]; kp2 += cosarray[j] * coeffkp2cos[j] + sinarray[j] * coeffkp2sin[j]; kp3 += cosarray[j] * coeffkp3cos[j] + sinarray[j] * coeffkp3sin[j]; kR += cosarray[j] * coeffkRcos[j] + sinarray[j] * coeffkRsin[j]; } /* Common factor and delay */ double factorp = (1./(1.+kn1)) * 0.5*n1Pn1plus; double factorc = (1./(1.+kn1)) * 0.5*n1Pn1cross; double firstdelay = -(kR*variant->OrbitR + (kp3 + 1)*variant->ConstL)/C_SI; double seconddelay = -(kR*variant->OrbitR + kp2*variant->ConstL)/C_SI; /* Result */ double y32 = factorp*(gsl_spline_eval(splinehp, t+firstdelay, accelhp) - gsl_spline_eval(splinehp, t+seconddelay, accelhp)) + factorc*(gsl_spline_eval(splinehc, t+firstdelay, accelhc) - gsl_spline_eval(splinehc, t+seconddelay, accelhc)); return y32; } double y31TD( const LISAconstellation *variant, /* Description of LISA variant */ gsl_spline* splinehp, /* Input spline for TD hplus */ gsl_spline* splinehc, /* Input spline for TD hcross */ gsl_interp_accel* accelhp, /* Accelerator for hp spline */ gsl_interp_accel* accelhc, /* Accelerator for hc spline */ const double t) /* Time */ { /* Precompute array of sine/cosine */ double phase=variant->ConstOmega*t + variant->ConstPhi0; for(int j=0; j<4; j++) { cosarray[j] = cos((j+1) * phase); sinarray[j] = sin((j+1) * phase); } /* Scalar products with k */ double n2Pn2plus = coeffn2Hn2plusconst; double n2Pn2cross = coeffn2Hn2crossconst; for(int j=0; j<4; j++) { n2Pn2plus += cosarray[j] * coeffn2Hn2pluscos[j] + sinarray[j] * coeffn2Hn2plussin[j]; n2Pn2cross += cosarray[j] * coeffn2Hn2crosscos[j] + sinarray[j] * coeffn2Hn2crosssin[j]; } /* Scalar products with k */ double kn2 = coeffkn2const; double kp3 = coeffkp3const; double kp1 = coeffkp1const; double kR = coeffkRconst; for(int j=0; j<2; j++) { kn2 += cosarray[j] * coeffkn2cos[j] + sinarray[j] * coeffkn2sin[j]; kp3 += cosarray[j] * coeffkp3cos[j] + sinarray[j] * coeffkp3sin[j]; kp1 += cosarray[j] * coeffkp1cos[j] + sinarray[j] * coeffkp1sin[j]; kR += cosarray[j] * coeffkRcos[j] + sinarray[j] * coeffkRsin[j]; } /* Common factor and delay */ double factorp = (1./(1.-kn2)) * 0.5*n2Pn2plus; double factorc = (1./(1.-kn2)) * 0.5*n2Pn2cross; double firstdelay = -(kR*variant->OrbitR + (kp3 + 1)*variant->ConstL)/C_SI; double seconddelay = -(kR*variant->OrbitR + kp1*variant->ConstL)/C_SI; /* Result */ double y31 = factorp*(gsl_spline_eval(splinehp, t+firstdelay, accelhp) - gsl_spline_eval(splinehp, t+seconddelay, accelhp)) + factorc*(gsl_spline_eval(splinehc, t+firstdelay, accelhc) - gsl_spline_eval(splinehc, t+seconddelay, accelhc)); return y31; } double y13TD( const LISAconstellation *variant, /* Description of LISA variant */ gsl_spline* splinehp, /* Input spline for TD hplus */ gsl_spline* splinehc, /* Input spline for TD hcross */ gsl_interp_accel* accelhp, /* Accelerator for hp spline */ gsl_interp_accel* accelhc, /* Accelerator for hc spline */ const double t) /* Time */ { /* Precompute array of sine/cosine */ double phase=variant->ConstOmega*t + variant->ConstPhi0; for(int j=0; j<4; j++) { cosarray[j] = cos((j+1) * phase); sinarray[j] = sin((j+1) * phase); } /* Scalar products with k */ double n2Pn2plus = coeffn2Hn2plusconst; double n2Pn2cross = coeffn2Hn2crossconst; for(int j=0; j<4; j++) { n2Pn2plus += cosarray[j] * coeffn2Hn2pluscos[j] + sinarray[j] * coeffn2Hn2plussin[j]; n2Pn2cross += cosarray[j] * coeffn2Hn2crosscos[j] + sinarray[j] * coeffn2Hn2crosssin[j]; } /* Scalar products with k */ double kn2 = coeffkn2const; double kp3 = coeffkp3const; double kp1 = coeffkp1const; double kR = coeffkRconst; for(int j=0; j<2; j++) { kn2 += cosarray[j] * coeffkn2cos[j] + sinarray[j] * coeffkn2sin[j]; kp3 += cosarray[j] * coeffkp3cos[j] + sinarray[j] * coeffkp3sin[j]; kp1 += cosarray[j] * coeffkp1cos[j] + sinarray[j] * coeffkp1sin[j]; kR += cosarray[j] * coeffkRcos[j] + sinarray[j] * coeffkRsin[j]; } /* Common factor and delay */ double factorp = (1./(1.+kn2)) * 0.5*n2Pn2plus; double factorc = (1./(1.+kn2)) * 0.5*n2Pn2cross; double firstdelay = -(kR*variant->OrbitR + (kp1 + 1)*variant->ConstL)/C_SI; double seconddelay = -(kR*variant->OrbitR + kp3*variant->ConstL)/C_SI; /* Result */ double y13 = factorp*(gsl_spline_eval(splinehp, t+firstdelay, accelhp) - gsl_spline_eval(splinehp, t+seconddelay, accelhp)) + factorc*(gsl_spline_eval(splinehc, t+firstdelay, accelhc) - gsl_spline_eval(splinehc, t+seconddelay, accelhc)); return y13; } /**/ int EvaluateTDIXYZTD( const LISAconstellation *variant, /* Description of LISA variant */ double* TDIX, /* Output: value of TDI observable X */ double* TDIY, /* Output: value of TDI observable Y */ double* TDIZ, /* Output: value of TDI observable Z */ gsl_spline* splinehp, /* Input spline for TD hplus */ gsl_spline* splinehc, /* Input spline for TD hcross */ gsl_interp_accel* accelhp, /* Accelerator for hp spline */ gsl_interp_accel* accelhc, /* Accelerator for hc spline */ const double t) /* Time */ { double armdelay = variant->ConstL/C_SI; double X = (y31TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y13TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) + (y21TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y12TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay)) - (y21TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y12TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) - (y31TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y13TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay)); double Y = (y12TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y21TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) + (y32TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y23TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay)) - (y32TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y23TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) - (y12TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y21TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay)); double Z = (y23TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y32TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) + (y13TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y31TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay)) - (y13TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y31TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) - (y23TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y32TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay)); /* Output */ *TDIX = X; *TDIY = Y; *TDIZ = Z; return SUCCESS; } /**/ int EvaluateTDIAETXYZTD( const LISAconstellation *variant, /* Description of LISA variant */ double* TDIA, /* Output: value of TDI observable X */ double* TDIE, /* Output: value of TDI observable Y */ double* TDIT, /* Output: value of TDI observable Z */ gsl_spline* splinehp, /* Input spline for TD hplus */ gsl_spline* splinehc, /* Input spline for TD hcross */ gsl_interp_accel* accelhp, /* Accelerator for hp spline */ gsl_interp_accel* accelhc, /* Accelerator for hc spline */ const double t) /* Time */ { double armdelay = variant->ConstL/C_SI; double X = (y31TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y13TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) + (y21TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y12TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay)) - (y21TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y12TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) - (y31TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y13TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay)); double Y = (y12TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y21TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) + (y32TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y23TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay)) - (y32TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y23TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) - (y12TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y21TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay)); double Z = (y23TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y32TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) + (y13TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y31TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay)) - (y13TD(variant, splinehp, splinehc, accelhp, accelhc, t) + y31TD(variant, splinehp, splinehc, accelhp, accelhc, t - armdelay)) - (y23TD(variant, splinehp, splinehc, accelhp, accelhc, t - 2*armdelay) + y32TD(variant, splinehp, splinehc, accelhp, accelhc, t - 3*armdelay)); /* Output */ *TDIA = 1./(2*sqrt(2)) * (Z-X); *TDIE = 1./(2*sqrt(6)) * (X-2*Y+Z); *TDIT = 1./(2*sqrt(3)) * (X+Y+Z); return SUCCESS; } /**/ int GenerateTDITD3Chanhphc( const LISAconstellation *variant, /* Description of LISA variant */ RealTimeSeries** TDI1, /* Output: real time series for TDI channel 1 */ RealTimeSeries** TDI2, /* Output: real time series for TDI channel 2 */ RealTimeSeries** TDI3, /* Output: real time series for TDI channel 3 */ gsl_spline* splinehp, /* Input spline for TD hplus */ gsl_spline* splinehc, /* Input spline for TD hcross */ gsl_interp_accel* accelhp, /* Accelerator for hp spline */ gsl_interp_accel* accelhc, /* Accelerator for hc spline */ gsl_vector* times, /* Vector of times to evaluate */ int nbptmargin, /* Margin set to 0 on both side to avoid problems with delays out of the domain */ TDItag tditag) /* Tag selecting the TDI observables */ { /* Initialize output */ int nbpt = times->size; RealTimeSeries_Init(TDI1, nbpt); RealTimeSeries_Init(TDI2, nbpt); RealTimeSeries_Init(TDI3, nbpt); gsl_vector_memcpy((*TDI1)->times, times); gsl_vector_memcpy((*TDI2)->times, times); gsl_vector_memcpy((*TDI3)->times, times); gsl_vector_set_zero((*TDI1)->h); gsl_vector_set_zero((*TDI2)->h); gsl_vector_set_zero((*TDI3)->h); /* Loop over time samples - we take a margin to avoid problems with the domain */ double t; double* tval = times->data; double* tdi1 = (*TDI1)->h->data; double* tdi2 = (*TDI2)->h->data; double* tdi3 = (*TDI3)->h->data; double tdi1val = 0, tdi2val = 0, tdi3val = 0; /* For testing purposes: basic observable yAB */ if(tditag==y12) { for(int i=nbptmargin; i<nbpt-nbptmargin; i++) { t = tval[i]; tdi1[i] = y12TD(variant, splinehp, splinehc, accelhp, accelhc, t); tdi2[i] = 0.; tdi3[i] = 0.; } } else if(tditag==TDIXYZ) { for(int i=nbptmargin; i<nbpt-nbptmargin; i++) { t = tval[i]; EvaluateTDIXYZTD(variant, &tdi1val, &tdi2val, &tdi3val, splinehp, splinehc, accelhp, accelhc, t); tdi1[i] = tdi1val; tdi2[i] = tdi2val; tdi3[i] = tdi3val; } } else if(tditag==TDIAETXYZ) { for(int i=nbptmargin; i<nbpt-nbptmargin; i++) { t = tval[i]; EvaluateTDIAETXYZTD(variant, &tdi1val, &tdi2val, &tdi3val, splinehp, splinehc, accelhp, accelhc, t); tdi1[i] = tdi1val; tdi2[i] = tdi2val; tdi3[i] = tdi3val; } } else { printf("Error: in GenerateTDITD3Chan, TDI tag not recognized.\n"); } return SUCCESS; } /* Generate hO orbital-delayed for one mode contribution from amp, phase */ int Generateh22TDO( const LISAconstellation *variant, /* Description of LISA variant */ AmpPhaseTimeSeries** h22tdO, /* Output: amp/phase time series for h22TDO */ gsl_spline* splineamp, /* Input spline for TD mode amplitude */ gsl_spline* splinephase, /* Input spline for TD mode phase */ gsl_interp_accel* accelamp, /* Accelerator for amp spline */ gsl_interp_accel* accelphase, /* Accelerator for phase spline */ gsl_vector* times, /* Vector of times to evaluate */ int nbptmargin) /* Margin set to 0 on both side to avoid problems with delays out of the domain */ { /* Initialize output */ int nbpt = times->size; AmpPhaseTimeSeries_Init(h22tdO, nbpt); gsl_vector_memcpy((*h22tdO)->times, times); gsl_vector_set_zero((*h22tdO)->h_amp); gsl_vector_set_zero((*h22tdO)->h_phase); /* Loop over time samples - we take a margin to avoid problems with the domain */ double t; double* tval = times->data; double* amp = (*h22tdO)->h_amp->data; double* phase = (*h22tdO)->h_phase->data; /* Loop over time samples */ for(int i=nbptmargin; i<nbpt-nbptmargin; i++) { t = tval[i]; hOTDAmpPhase(variant,&(amp[i]), &(phase[i]), splineamp, splinephase, accelamp, accelphase, t); } return SUCCESS; } /* Generate y12L from orbital-delayed h22 in amp/phase form */ /* Note: includes both h22 and h2m2 contributions, assuming planar orbits so that h2-2 = h22* */ /* BEWARE: this ignores the fact that processing through orbital delay breaks the h2-2 = h22* symmetry */ int Generatey12LTD( const LISAconstellation *variant, /* Description of LISA variant */ RealTimeSeries** y12Ltd, /* Output: real time series for y12L */ gsl_spline* splineamp, /* Input spline for h22 TD amplitude */ gsl_spline* splinephase, /* Input spline for h22 TD phase */ gsl_interp_accel* accelamp, /* Accelerator for h22 amp spline */ gsl_interp_accel* accelphase, /* Accelerator for h22 phase spline */ gsl_vector* times, /* Vector of times to evaluate */ double Theta, /* Inclination */ double Phi, /* Phase */ int nbptmargin) /* Margin set to 0 on both side to avoid problems with delays out of the domain */ { /* Initialize output */ int nbpt = times->size; RealTimeSeries_Init(y12Ltd, nbpt); gsl_vector_memcpy((*y12Ltd)->times, times); gsl_vector_set_zero((*y12Ltd)->h); /* Spin-weighted spherical harmonic Y22 and Y2-2 */ double complex Y22 = SpinWeightedSphericalHarmonic(Theta, Phi, -2, 2, 2); double complex Y2m2 = SpinWeightedSphericalHarmonic(Theta, Phi, -2, 2, -2); /* Loop over time samples - we take a margin to avoid problems with the domain */ double t; double* tval = times->data; double* y12val = (*y12Ltd)->h->data; /* Loop over time samples */ for(int i=nbptmargin; i<nbpt-nbptmargin; i++) { t = tval[i]; y12val[i] = y12LTDfromh22AmpPhase(variant, splineamp, splinephase, accelamp, accelphase, Y22, Y2m2, t); } return SUCCESS; } /* Generate y12 from original h22 in amp/phase form, including both */ /* Here no approximation made as to the decomposition of the response in two steps, all the response is evaluated at once */ /* Note: includes both h22 and h2m2 contributions, assuming planar orbits so that h2-2 = h22* */ int Generatey12TD( const LISAconstellation *variant, /* Description of LISA variant */ RealTimeSeries** y12td, /* Output: real time series for y12L */ gsl_spline* splineamp, /* Input spline for h22 TD amplitude */ gsl_spline* splinephase, /* Input spline for h22 TD phase */ gsl_interp_accel* accelamp, /* Accelerator for h22 amp spline */ gsl_interp_accel* accelphase, /* Accelerator for h22 phase spline */ gsl_vector* times, /* Vector of times to evaluate */ double Theta, /* Inclination */ double Phi, /* Phase */ int nbptmargin) /* Margin set to 0 on both side to avoid problems with delays out of the domain */ { /* Initialize output */ int nbpt = times->size; RealTimeSeries_Init(y12td, nbpt); gsl_vector_memcpy((*y12td)->times, times); gsl_vector_set_zero((*y12td)->h); /* Spin-weighted spherical harmonic Y22 and Y2-2 */ double complex Y22 = SpinWeightedSphericalHarmonic(Theta, Phi, -2, 2, 2); double complex Y2m2 = SpinWeightedSphericalHarmonic(Theta, Phi, -2, 2, -2); /* Loop over time samples - we take a margin to avoid problems with the domain */ double t; double* tval = times->data; double* y12val = (*y12td)->h->data; /* Loop over time samples */ for(int i=nbptmargin; i<nbpt-nbptmargin; i++) { t = tval[i]; y12val[i] = y12TDfromh22AmpPhase(variant, splineamp, splinephase, accelamp, accelphase, Y22, Y2m2, t); } return SUCCESS; }
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/blob.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/configure.h" #include "magick/constitute.h" #include "magick/decorate.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/effect.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/montage.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/shear.h" #include "magick/signature-private.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/xml-tree.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ static const char *MinimalThresholdMap = "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image, % const size_t width,const size_t height, % const ssize_t offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o offset: the mean offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const ssize_t offset, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType number_pixels; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); if ((width == 0) || (height == 0)) return(threshold_image); if (SetImageStorageClass(threshold_image,DirectClass) == MagickFalse) { InheritException(exception,&threshold_image->exception); threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Local adaptive threshold. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&zero); number_pixels=(MagickRealType) (width*height); image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket channel_bias, channel_sum; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p, *magick_restrict r; register IndexPacket *magick_restrict threshold_indexes; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) height/2L,image->columns+width,height,exception); q=GetCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); threshold_indexes=GetCacheViewAuthenticIndexQueue(threshold_view); channel_bias=zero; channel_sum=zero; r=p; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) { channel_bias.red+=r[u].red; channel_bias.green+=r[u].green; channel_bias.blue+=r[u].blue; channel_bias.opacity+=r[u].opacity; if (image->colorspace == CMYKColorspace) channel_bias.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u); } channel_sum.red+=r[u].red; channel_sum.green+=r[u].green; channel_sum.blue+=r[u].blue; channel_sum.opacity+=r[u].opacity; if (image->colorspace == CMYKColorspace) channel_sum.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u); } r+=image->columns+width; } for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket mean; mean=zero; r=p; channel_sum.red-=channel_bias.red; channel_sum.green-=channel_bias.green; channel_sum.blue-=channel_bias.blue; channel_sum.opacity-=channel_bias.opacity; channel_sum.index-=channel_bias.index; channel_bias=zero; for (v=0; v < (ssize_t) height; v++) { channel_bias.red+=r[0].red; channel_bias.green+=r[0].green; channel_bias.blue+=r[0].blue; channel_bias.opacity+=r[0].opacity; if (image->colorspace == CMYKColorspace) channel_bias.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+0); channel_sum.red+=r[width-1].red; channel_sum.green+=r[width-1].green; channel_sum.blue+=r[width-1].blue; channel_sum.opacity+=r[width-1].opacity; if (image->colorspace == CMYKColorspace) channel_sum.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+ width-1); r+=image->columns+width; } mean.red=(MagickRealType) (channel_sum.red/number_pixels+offset); mean.green=(MagickRealType) (channel_sum.green/number_pixels+offset); mean.blue=(MagickRealType) (channel_sum.blue/number_pixels+offset); mean.opacity=(MagickRealType) (channel_sum.opacity/number_pixels+offset); if (image->colorspace == CMYKColorspace) mean.index=(MagickRealType) (channel_sum.index/number_pixels+offset); SetPixelRed(q,((MagickRealType) GetPixelRed(q) <= mean.red) ? 0 : QuantumRange); SetPixelGreen(q,((MagickRealType) GetPixelGreen(q) <= mean.green) ? 0 : QuantumRange); SetPixelBlue(q,((MagickRealType) GetPixelBlue(q) <= mean.blue) ? 0 : QuantumRange); SetPixelOpacity(q,((MagickRealType) GetPixelOpacity(q) <= mean.opacity) ? 0 : QuantumRange); if (image->colorspace == CMYKColorspace) SetPixelIndex(threshold_indexes+x,(((MagickRealType) GetPixelIndex( threshold_indexes+x) <= mean.index) ? 0 : QuantumRange)); p++; q++; } sync=SyncCacheViewAuthenticPixels(threshold_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically performs image thresholding % dependent on which method you specify. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; register ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; register ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const Image *image,const double *histogram) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; register ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ magick_unreferenced(image); start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; register ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p++; } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(image,histogram); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property); return(BilevelImage(image,QuantumRange*threshold/100.0)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImageChannel method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold) % MagickBooleanType BilevelImageChannel(Image *image, % const ChannelType channel,const double threshold) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o threshold: define the threshold values. % % Aside: You can get the same results as operator using LevelImageChannels() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold) { MagickBooleanType status; status=BilevelImageChannel(image,DefaultChannels,threshold); return(status); } MagickExport MagickBooleanType BilevelImageChannel(Image *image, const ChannelType channel,const double threshold) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); /* Bilevel threshold image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if ((channel & SyncChannels) != 0) { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelIntensity(image,q) <= threshold ? 0 : QuantumRange); SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); q++; } } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold ? 0 : QuantumRange); if ((channel & GreenChannel) != 0) SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold ? 0 : QuantumRange); if ((channel & BlueChannel) != 0) SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold ? 0 : QuantumRange); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold ? 0 : QuantumRange); else SetPixelAlpha(q,(MagickRealType) GetPixelAlpha(q) <= threshold ? OpaqueOpacity : TransparentOpacity); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <= threshold ? 0 : QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image,const char *threshold) % MagickBooleanType BlackThresholdImageChannel(Image *image, % const ChannelType channel,const char *threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *threshold) { MagickBooleanType status; status=BlackThresholdImageChannel(image,DefaultChannels,threshold, &image->exception); return(status); } MagickExport MagickBooleanType BlackThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; threshold.opacity=geometry_info.psi; if ((flags & PsiValue) == 0) threshold.opacity=threshold.red; threshold.index=geometry_info.chi; if ((flags & ChiValue) == 0) threshold.index=threshold.red; if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.opacity*=(MagickRealType) (QuantumRange/100.0); threshold.index*=(MagickRealType) (QuantumRange/100.0); } if ((IsMagickGray(&threshold) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); /* Black threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (((channel & RedChannel) != 0) && ((MagickRealType) GetPixelRed(q) < threshold.red)) SetPixelRed(q,0); if (((channel & GreenChannel) != 0) && ((MagickRealType) GetPixelGreen(q) < threshold.green)) SetPixelGreen(q,0); if (((channel & BlueChannel) != 0) && ((MagickRealType) GetPixelBlue(q) < threshold.blue)) SetPixelBlue(q,0); if (((channel & OpacityChannel) != 0) && ((MagickRealType) GetPixelOpacity(q) < threshold.opacity)) SetPixelOpacity(q,0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && ((MagickRealType) GetPixelIndex(indexes+x) < threshold.index)) SetPixelIndex(indexes+x,0); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImageChannel method is: % % MagickBooleanType ClampImage(Image *image) % MagickBooleanType ClampImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % */ MagickExport MagickBooleanType ClampImage(Image *image) { MagickBooleanType status; status=ClampImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType ClampImageChannel(Image *image, const ChannelType channel) { #define ClampImageTag "Clamp/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelPacket *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { SetPixelRed(q,ClampPixel((MagickRealType) GetPixelRed(q))); SetPixelGreen(q,ClampPixel((MagickRealType) GetPixelGreen(q))); SetPixelBlue(q,ClampPixel((MagickRealType) GetPixelBlue(q))); SetPixelOpacity(q,ClampPixel((MagickRealType) GetPixelOpacity(q))); q++; } return(SyncImage(image)); } /* Clamp image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixel((MagickRealType) GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixel((MagickRealType) GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixel((MagickRealType) GetPixelBlue(q))); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampPixel((MagickRealType) GetPixelOpacity(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampPixel((MagickRealType) GetPixelIndex( indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClampImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMapFile(const char *xml, const char *filename,const char *map_id,ExceptionInfo *exception) { const char *attribute, *content; double value; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; map = (ThresholdMap *) NULL; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(map); for (threshold = GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold = GetNextXMLTreeTag(threshold) ) { attribute=GetXMLTreeAttribute(threshold, "map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold, "alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } /* The map has been found -- allocate a Threshold Map to return */ map=(ThresholdMap *) AcquireMagickMemory(sizeof(ThresholdMap)); if (map == (ThresholdMap *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; /* Assign basic attributeibutes. */ attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels, "divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } /* Allocate theshold levels array. */ content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); { char *p; register ssize_t i; /* Parse levels into integer array. */ for (i=0; i< (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() load and search one or more threshold map files for the % a map matching the given name or aliase. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; ThresholdMap *map; map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { XMLTreeInfo *thresholds,*threshold,*description; const char *map,*alias,*content; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); for( threshold = GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold = GetNextXMLTreeTag(threshold) ) { map = GetXMLTreeAttribute(threshold, "map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias = GetXMLTreeAttribute(threshold, "alias"); /* alias is optional, no if test needed */ description=GetXMLTreeChild(threshold,"description"); if ( description == (XMLTreeInfo *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if ( content == (char *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() uses the ordered dithering technique of reducing color % images to monochrome using positional information to retain as much % information as possible. % % WARNING: This function is deprecated, and is now just a call to % the more more powerful OrderedPosterizeImage(); function. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image) % MagickBooleanType OrderedDitherImageChannel(Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image) { MagickBooleanType status; status=OrderedDitherImageChannel(image,DefaultChannels,&image->exception); return(status); } MagickExport MagickBooleanType OrderedDitherImageChannel(Image *image, const ChannelType channel,ExceptionInfo *exception) { MagickBooleanType status; /* Call the augumented function OrderedPosterizeImage() */ status=OrderedPosterizeImageChannel(image,channel,"o8x8",exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedPosterizeImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedPosterizeImage method is: % % MagickBooleanType OrderedPosterizeImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % MagickBooleanType OrderedPosterizeImageChannel(Image *image, % const ChannelType channel,const char *threshold_map, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels tho dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with a ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedPosterizeImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { MagickBooleanType status; status=OrderedPosterizeImageChannel(image,DefaultChannels,threshold_map, exception); return(status); } MagickExport MagickBooleanType OrderedPosterizeImageChannel(Image *image, const ChannelType channel,const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; LongPixelPacket levels; MagickBooleanType status; MagickOffsetType progress; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); { char token[MaxTextExtent]; register const char *p; p=(char *)threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MaxTextExtent-1)) break; token[p-threshold_map] = *p; p++; } token[p-threshold_map] = '\0'; map = GetThresholdMap(token, exception); if ( map == (ThresholdMap *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } } /* Set channel levels from extra comma separated arguments Default to 2, the single value given, or individual channel values */ #if 1 { /* parse directly as a comma separated list of integers */ char *p; p = strchr((char *) threshold_map,','); if ( p != (char *) NULL && isdigit((int) ((unsigned char) *(++p))) ) levels.index = (unsigned int) strtoul(p, &p, 10); else levels.index = 2; levels.red = ((channel & RedChannel ) != 0) ? levels.index : 0; levels.green = ((channel & GreenChannel) != 0) ? levels.index : 0; levels.blue = ((channel & BlueChannel) != 0) ? levels.index : 0; levels.opacity = ((channel & OpacityChannel) != 0) ? levels.index : 0; levels.index = ((channel & IndexChannel) != 0 && (image->colorspace == CMYKColorspace)) ? levels.index : 0; /* if more than a single number, each channel has a separate value */ if ( p != (char *) NULL && *p == ',' ) { p=strchr((char *) threshold_map,','); p++; if ((channel & RedChannel) != 0) levels.red = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & GreenChannel) != 0) levels.green = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & BlueChannel) != 0) levels.blue = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & IndexChannel) != 0 && image->colorspace == CMYKColorspace) levels.index=(unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & OpacityChannel) != 0) levels.opacity = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); } } #else /* Parse level values as a geometry */ /* This difficult! * How to map GeometryInfo structure elements into * LongPixelPacket structure elements, but according to channel? * Note the channels list may skip elements!!!! * EG -channel BA -ordered-dither map,2,3 * will need to map g.rho -> l.blue, and g.sigma -> l.opacity * A simpler way is needed, probably converting geometry to a temporary * array, then using channel to advance the index into ssize_t pixel packet. */ #endif #if 0 printf("DEBUG levels r=%u g=%u b=%u a=%u i=%u\n", levels.red, levels.green, levels.blue, levels.opacity, levels.index); #endif { /* Do the posterized ordered dithering of the image */ ssize_t d; /* d = number of psuedo-level divisions added between color levels */ d = map->divisor-1; /* reduce levels to levels - 1 */ levels.red = levels.red ? levels.red-1 : 0; levels.green = levels.green ? levels.green-1 : 0; levels.blue = levels.blue ? levels.blue-1 : 0; levels.opacity = levels.opacity ? levels.opacity-1 : 0; levels.index = levels.index ? levels.index-1 : 0; if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t threshold, t, l; /* Figure out the dither threshold for this pixel This must be a integer from 1 to map->divisor-1 */ threshold = map->levels[(x%map->width) +map->width*(y%map->height)]; /* Dither each channel in the image as appropriate Notes on the integer Math... total number of divisions = (levels-1)*(divisor-1)+1) t1 = this colors psuedo_level = q->red * total_divisions / (QuantumRange+1) l = posterization level 0..levels t = dither threshold level 0..divisor-1 NB: 0 only on last Each color_level is of size QuantumRange / (levels-1) NB: All input levels and divisor are already had 1 subtracted Opacity is inverted so 'off' represents transparent. */ if (levels.red) { t = (ssize_t) (QuantumScale*GetPixelRed(q)*(levels.red*d+1)); l = t/d; t = t-l*d; SetPixelRed(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.red))); } if (levels.green) { t = (ssize_t) (QuantumScale*GetPixelGreen(q)* (levels.green*d+1)); l = t/d; t = t-l*d; SetPixelGreen(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.green))); } if (levels.blue) { t = (ssize_t) (QuantumScale*GetPixelBlue(q)* (levels.blue*d+1)); l = t/d; t = t-l*d; SetPixelBlue(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.blue))); } if (levels.opacity) { t = (ssize_t) ((1.0-QuantumScale*GetPixelOpacity(q))* (levels.opacity*d+1)); l = t/d; t = t-l*d; SetPixelOpacity(q,ClampToQuantum((MagickRealType) ((1.0-l-(t >= threshold))*(MagickRealType) QuantumRange/ levels.opacity))); } if (levels.index) { t = (ssize_t) (QuantumScale*GetPixelIndex(indexes+x)* (levels.index*d+1)); l = t/d; t = t-l*d; SetPixelIndex(indexes+x,ClampToQuantum((MagickRealType) ((l+ (t>=threshold))*(MagickRealType) QuantumRange/levels.index))); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,DitherImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImageChannel method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon) % MagickBooleanType PerceptibleImageChannel(Image *image, % const ChannelType channel,const double epsilon) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon) { MagickBooleanType status; status=PerceptibleImageChannel(image,DefaultChannels,epsilon); return(status); } MagickExport MagickBooleanType PerceptibleImageChannel(Image *image, const ChannelType channel,const double epsilon) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelPacket *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon)); SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon)); SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon)); SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon)); q++; } return(SyncImage(image)); } /* Perceptible image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,PerceptibleThreshold(GetPixelIndex(indexes+x), epsilon)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PerceptibleImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImageChannel(Image *image, % const char *thresholds,ExceptionInfo *exception) % MagickBooleanType RandomThresholdImageChannel(Image *image, % const ChannelType channel,const char *thresholds, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o thresholds: a geometry string containing low,high thresholds. If the % string contains 2x2, 3x3, or 4x4, an ordered dither of order 2, 3, or 4 % is performed instead. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { MagickBooleanType status; status=RandomThresholdImageChannel(image,DefaultChannels,thresholds, exception); return(status); } MagickExport MagickBooleanType RandomThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickStatusType flags; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickRealType min_threshold, max_threshold; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (thresholds == (const char *) NULL) return(MagickTrue); GetMagickPixelPacket(image,&threshold); min_threshold=0.0; max_threshold=(MagickRealType) QuantumRange; flags=ParseGeometry(thresholds,&geometry_info); min_threshold=geometry_info.rho; max_threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) max_threshold=min_threshold; if (strchr(thresholds,'%') != (char *) NULL) { max_threshold*=(MagickRealType) (0.01*QuantumRange); min_threshold*=(MagickRealType) (0.01*QuantumRange); } else if (((max_threshold == min_threshold) || (max_threshold == 1)) && (min_threshold <= 8)) { /* Backward Compatibility -- ordered-dither -- IM v 6.2.9-6. */ status=OrderedPosterizeImageChannel(image,channel,thresholds,exception); return(status); } /* Random threshold image. */ status=MagickTrue; progress=0; if (channel == CompositeChannels) { if (AcquireImageColormap(image,2) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { IndexPacket index; MagickRealType intensity; intensity=GetPixelIntensity(image,q); if (intensity < min_threshold) threshold.index=min_threshold; else if (intensity > max_threshold) threshold.index=max_threshold; else threshold.index=(MagickRealType)(QuantumRange* GetPseudoRandomValue(random_info[id])); index=(IndexPacket) (intensity <= threshold.index ? 0 : 1); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if ((MagickRealType) GetPixelRed(q) < min_threshold) threshold.red=min_threshold; else if ((MagickRealType) GetPixelRed(q) > max_threshold) threshold.red=max_threshold; else threshold.red=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & GreenChannel) != 0) { if ((MagickRealType) GetPixelGreen(q) < min_threshold) threshold.green=min_threshold; else if ((MagickRealType) GetPixelGreen(q) > max_threshold) threshold.green=max_threshold; else threshold.green=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & BlueChannel) != 0) { if ((MagickRealType) GetPixelBlue(q) < min_threshold) threshold.blue=min_threshold; else if ((MagickRealType) GetPixelBlue(q) > max_threshold) threshold.blue=max_threshold; else threshold.blue=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & OpacityChannel) != 0) { if ((MagickRealType) GetPixelOpacity(q) < min_threshold) threshold.opacity=min_threshold; else if ((MagickRealType) GetPixelOpacity(q) > max_threshold) threshold.opacity=max_threshold; else threshold.opacity=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if ((MagickRealType) GetPixelIndex(indexes+x) < min_threshold) threshold.index=min_threshold; else if ((MagickRealType) GetPixelIndex(indexes+x) > max_threshold) threshold.index=max_threshold; else threshold.index=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & RedChannel) != 0) SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold.red ? 0 : QuantumRange); if ((channel & GreenChannel) != 0) SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold.green ? 0 : QuantumRange); if ((channel & BlueChannel) != 0) SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold.blue ? 0 : QuantumRange); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold.opacity ? 0 : QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <= threshold.index ? 0 : QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image,const char *threshold) % MagickBooleanType WhiteThresholdImageChannel(Image *image, % const ChannelType channel,const char *threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *threshold) { MagickBooleanType status; status=WhiteThresholdImageChannel(image,DefaultChannels,threshold, &image->exception); return(status); } MagickExport MagickBooleanType WhiteThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); flags=ParseGeometry(thresholds,&geometry_info); GetMagickPixelPacket(image,&threshold); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; threshold.opacity=geometry_info.psi; if ((flags & PsiValue) == 0) threshold.opacity=threshold.red; threshold.index=geometry_info.chi; if ((flags & ChiValue) == 0) threshold.index=threshold.red; if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.opacity*=(MagickRealType) (QuantumRange/100.0); threshold.index*=(MagickRealType) (QuantumRange/100.0); } if ((IsMagickGray(&threshold) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (((channel & RedChannel) != 0) && ((MagickRealType) GetPixelRed(q) > threshold.red)) SetPixelRed(q,QuantumRange); if (((channel & GreenChannel) != 0) && ((MagickRealType) GetPixelGreen(q) > threshold.green)) SetPixelGreen(q,QuantumRange); if (((channel & BlueChannel) != 0) && ((MagickRealType) GetPixelBlue(q) > threshold.blue)) SetPixelBlue(q,QuantumRange); if (((channel & OpacityChannel) != 0) && ((MagickRealType) GetPixelOpacity(q) > threshold.opacity)) SetPixelOpacity(q,QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && ((MagickRealType) GetPixelIndex(indexes+x)) > threshold.index) SetPixelIndex(indexes+x,QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
convolution_3x3_pack8to1_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if !(__AVX512VNNI__ || __AVXVNNI__ || __AVX2__ || __XOP__) #if NCNN_RUNTIME_CPU && NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__ void conv3x3s1_winograd43_transform_kernel_pack8to1_int8_sse_avx512vnni(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt); void conv3x3s1_winograd43_pack8to1_int8_sse_avx512vnni(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt); #endif #if NCNN_RUNTIME_CPU && NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__ void conv3x3s1_winograd43_transform_kernel_pack8to1_int8_sse_avxvnni(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt); void conv3x3s1_winograd43_pack8to1_int8_sse_avxvnni(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt); #endif #if NCNN_RUNTIME_CPU && NCNN_AVX2 && __AVX__ && !__AVX2__ void conv3x3s1_winograd43_transform_kernel_pack8to1_int8_sse_avx2(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt); void conv3x3s1_winograd43_pack8to1_int8_sse_avx2(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt); #endif #if NCNN_RUNTIME_CPU && NCNN_XOP && __SSE2__ && !__XOP__ void conv3x3s1_winograd43_transform_kernel_pack8to1_int8_sse_xop(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt); void conv3x3s1_winograd43_pack8to1_int8_sse_xop(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt); #endif #endif static void conv3x3s1_winograd43_transform_kernel_pack8to1_int8_sse(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt) { #if !(__AVX512VNNI__ || __AVXVNNI__ || __AVX2__ || __XOP__) #if NCNN_RUNTIME_CPU && NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__ if (ncnn::cpu_support_x86_avx512_vnni()) { conv3x3s1_winograd43_transform_kernel_pack8to1_int8_sse_avx512vnni(kernel, kernel_tm_pack8to1, inch, outch, opt); return; } #endif #if NCNN_RUNTIME_CPU && NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__ if (ncnn::cpu_support_x86_avx_vnni()) { conv3x3s1_winograd43_transform_kernel_pack8to1_int8_sse_avxvnni(kernel, kernel_tm_pack8to1, inch, outch, opt); return; } #endif #if NCNN_RUNTIME_CPU && NCNN_AVX2 && __AVX__ && !__AVX2__ if (ncnn::cpu_support_x86_avx2()) { conv3x3s1_winograd43_transform_kernel_pack8to1_int8_sse_avx2(kernel, kernel_tm_pack8to1, inch, outch, opt); return; } #endif #if NCNN_RUNTIME_CPU && NCNN_XOP && __SSE2__ && !__XOP__ if (ncnn::cpu_support_x86_xop()) { conv3x3s1_winograd43_transform_kernel_pack8to1_int8_sse_xop(kernel, kernel_tm_pack8to1, inch, outch, opt); return; } #endif #endif // winograd43 transform kernel Mat kernel_tm(6 * 6, inch, outch, (size_t)2u); const short ktm[6][3] = { {6, 0, 0}, {-4, -4, -4}, {-4, 4, -4}, {1, 2, 4}, {1, -2, 4}, {0, 0, 6} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { short* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = 4b-8a-inch/8a-36-outch/4b kernel_tm_pack8to1.create(8 * inch / 8, 36, outch / 4 + outch % 4, (size_t)2u * 4, 4); int p = 0; for (; p + 3 < outch; p += 4) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); Mat g0 = kernel_tm_pack8to1.channel(p / 4); for (int k = 0; k < 36; k++) { short* g00 = g0.row<short>(k); for (int q = 0; q + 7 < inch; q += 8) { #if __AVXVNNI__ || __AVX512VNNI__ || __XOP__ for (int i = 0; i < 4; i++) { const short* k00 = k0.row<const short>(q + i * 2); const short* k10 = k1.row<const short>(q + i * 2); const short* k20 = k2.row<const short>(q + i * 2); const short* k30 = k3.row<const short>(q + i * 2); const short* k01 = k0.row<const short>(q + i * 2 + 1); const short* k11 = k1.row<const short>(q + i * 2 + 1); const short* k21 = k2.row<const short>(q + i * 2 + 1); const short* k31 = k3.row<const short>(q + i * 2 + 1); g00[0] = k00[k]; g00[1] = k01[k]; g00[2] = k10[k]; g00[3] = k11[k]; g00[4] = k20[k]; g00[5] = k21[k]; g00[6] = k30[k]; g00[7] = k31[k]; g00 += 8; } #else for (int i = 0; i < 8; i++) { g00[0] = k0.row<const short>(q + i)[k]; g00[1] = k1.row<const short>(q + i)[k]; g00[2] = k2.row<const short>(q + i)[k]; g00[3] = k3.row<const short>(q + i)[k]; g00 += 4; } #endif } } } for (; p < outch; p++) { const Mat k0 = kernel_tm.channel(p); Mat g0 = kernel_tm_pack8to1.channel(p / 4 + p % 4); for (int k = 0; k < 36; k++) { short* g00 = g0.row<short>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = k0.row<const short>(q + i)[k]; g00 += 1; } } } } } static void conv3x3s1_winograd43_pack8to1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt) { #if !(__AVX512VNNI__ || __AVXVNNI__ || __AVX2__ || __XOP__) #if NCNN_RUNTIME_CPU && NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__ if (ncnn::cpu_support_x86_avx512_vnni()) { conv3x3s1_winograd43_pack8to1_int8_sse_avx512vnni(bottom_blob, top_blob, kernel_tm, opt); return; } #endif #if NCNN_RUNTIME_CPU && NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__ if (ncnn::cpu_support_x86_avx_vnni()) { conv3x3s1_winograd43_pack8to1_int8_sse_avxvnni(bottom_blob, top_blob, kernel_tm, opt); return; } #endif #if NCNN_RUNTIME_CPU && NCNN_AVX2 && __AVX__ && !__AVX2__ if (ncnn::cpu_support_x86_avx2()) { conv3x3s1_winograd43_pack8to1_int8_sse_avx2(bottom_blob, top_blob, kernel_tm, opt); return; } #endif #if NCNN_RUNTIME_CPU && NCNN_XOP && __SSE2__ && !__XOP__ if (ncnn::cpu_support_x86_xop()) { conv3x3s1_winograd43_pack8to1_int8_sse_xop(bottom_blob, top_blob, kernel_tm, opt); return; } #endif #endif int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; // size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); short tmp[6][6][8]; // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8; for (int m = 0; m < 6; m++) { // TODO use _mm_cvtepi8_epi16 on sse4.1 __m128i _r00_01 = _mm_loadu_si128((const __m128i*)r0); __m128i _r02_03 = _mm_loadu_si128((const __m128i*)(r0 + 16)); __m128i _r04_05 = _mm_loadu_si128((const __m128i*)(r0 + 32)); __m128i _extr0001 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r00_01); __m128i _extr0203 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r02_03); __m128i _extr0405 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r04_05); __m128i _r00 = _mm_unpacklo_epi8(_r00_01, _extr0001); __m128i _r01 = _mm_unpackhi_epi8(_r00_01, _extr0001); __m128i _r02 = _mm_unpacklo_epi8(_r02_03, _extr0203); __m128i _r03 = _mm_unpackhi_epi8(_r02_03, _extr0203); __m128i _r04 = _mm_unpacklo_epi8(_r04_05, _extr0405); __m128i _r05 = _mm_unpackhi_epi8(_r04_05, _extr0405); __m128i _v5 = _mm_set1_epi16(5); __m128i _tmp0m = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_r00, 2), _r04), _mm_mullo_epi16(_r02, _v5)); __m128i _tmp1m = _mm_sub_epi16(_mm_add_epi16(_r04, _r03), _mm_slli_epi16(_mm_add_epi16(_r01, _r02), 2)); __m128i _tmp2m = _mm_add_epi16(_mm_sub_epi16(_r04, _r03), _mm_slli_epi16(_mm_sub_epi16(_r01, _r02), 2)); __m128i _tmp3m = _mm_sub_epi16(_mm_sub_epi16(_r04, _r02), _mm_slli_epi16(_mm_sub_epi16(_r01, _r03), 1)); __m128i _tmp4m = _mm_add_epi16(_mm_sub_epi16(_r04, _r02), _mm_slli_epi16(_mm_sub_epi16(_r01, _r03), 1)); __m128i _tmp5m = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_r01, 2), _r05), _mm_mullo_epi16(_r03, _v5)); _mm_storeu_si128((__m128i*)tmp[0][m], _tmp0m); _mm_storeu_si128((__m128i*)tmp[1][m], _tmp1m); _mm_storeu_si128((__m128i*)tmp[2][m], _tmp2m); _mm_storeu_si128((__m128i*)tmp[3][m], _tmp3m); _mm_storeu_si128((__m128i*)tmp[4][m], _tmp4m); _mm_storeu_si128((__m128i*)tmp[5][m], _tmp5m); r0 += w * 8; } short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8; short* r0_tm_1 = r0_tm_0 + tiles * 8; short* r0_tm_2 = r0_tm_0 + tiles * 16; short* r0_tm_3 = r0_tm_0 + tiles * 24; short* r0_tm_4 = r0_tm_0 + tiles * 32; short* r0_tm_5 = r0_tm_0 + tiles * 40; for (int m = 0; m < 6; m++) { __m128i _tmp00 = _mm_loadu_si128((const __m128i*)tmp[m][0]); __m128i _tmp01 = _mm_loadu_si128((const __m128i*)tmp[m][1]); __m128i _tmp02 = _mm_loadu_si128((const __m128i*)tmp[m][2]); __m128i _tmp03 = _mm_loadu_si128((const __m128i*)tmp[m][3]); __m128i _tmp04 = _mm_loadu_si128((const __m128i*)tmp[m][4]); __m128i _tmp05 = _mm_loadu_si128((const __m128i*)tmp[m][5]); __m128i _v5 = _mm_set1_epi16(5); __m128i _r0tm0 = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_tmp00, 2), _tmp04), _mm_mullo_epi16(_tmp02, _v5)); __m128i _r0tm1 = _mm_sub_epi16(_mm_add_epi16(_tmp04, _tmp03), _mm_slli_epi16(_mm_add_epi16(_tmp01, _tmp02), 2)); __m128i _r0tm2 = _mm_add_epi16(_mm_sub_epi16(_tmp04, _tmp03), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp02), 2)); __m128i _r0tm3 = _mm_sub_epi16(_mm_sub_epi16(_tmp04, _tmp02), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp03), 1)); __m128i _r0tm4 = _mm_add_epi16(_mm_sub_epi16(_tmp04, _tmp02), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp03), 1)); __m128i _r0tm5 = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_tmp01, 2), _tmp05), _mm_mullo_epi16(_tmp03, _v5)); _mm_storeu_si128((__m128i*)r0_tm_0, _r0tm0); _mm_storeu_si128((__m128i*)r0_tm_1, _r0tm1); _mm_storeu_si128((__m128i*)r0_tm_2, _r0tm2); _mm_storeu_si128((__m128i*)r0_tm_3, _r0tm3); _mm_storeu_si128((__m128i*)r0_tm_4, _r0tm4); _mm_storeu_si128((__m128i*)r0_tm_5, _r0tm5); r0_tm_0 += tiles * 48; r0_tm_1 += tiles * 48; r0_tm_2 += tiles * 48; r0_tm_3 += tiles * 48; r0_tm_4 += tiles * 48; r0_tm_5 += tiles * 48; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __AVX2__ if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __AVX2__ for (; i + 3 < tiles; i += 4) { short* tmpptr = tm2.row<short>(i / 4); const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256i _r0 = _mm256_loadu_si256((const __m256i*)r0); __m256i _r1 = _mm256_loadu_si256((const __m256i*)(r0 + 16)); _mm256_storeu_si256((__m256i*)tmpptr, _r0); _mm256_storeu_si256((__m256i*)(tmpptr + 16), _r1); r0 += bottom_blob_tm.cstep * 8; tmpptr += 32; } } #endif for (; i + 1 < tiles; i += 2) { #if __AVX2__ short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2); #else short* tmpptr = tm2.row<short>(i / 2); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m128i _r0 = _mm_loadu_si128((const __m128i*)r0); __m128i _r1 = _mm_loadu_si128((const __m128i*)(r0 + 8)); _mm_storeu_si128((__m128i*)tmpptr, _r0); _mm_storeu_si128((__m128i*)(tmpptr + 8), _r1); r0 += bottom_blob_tm.cstep * 8; tmpptr += 16; } } for (; i < tiles; i++) { #if __AVX2__ short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2 + i % 2); #else short* tmpptr = tm2.row<short>(i / 2 + i % 2); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m128i _r0 = _mm_loadu_si128((const __m128i*)r0); _mm_storeu_si128((__m128i*)tmpptr, _r0); r0 += bottom_blob_tm.cstep * 8; tmpptr += 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p + 1); int* output2_tm = top_blob_tm.channel(p + 2); int* output3_tm = top_blob_tm.channel(p + 3); const Mat kernel0_tm = kernel_tm.channel(p / 4); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __AVX2__ for (; i + 3 < tiles; i += 4) { const short* r0 = bb2.row<const short>(i / 4); const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 __m256i _sum0_1 = _mm256_setzero_si256(); __m256i _sum2_3 = _mm256_setzero_si256(); __m256i _sum4_5 = _mm256_setzero_si256(); __m256i _sum6_7 = _mm256_setzero_si256(); for (int j = 0; j < nn; j++) { // 0 1 2 3 4 5 6 7 8 9 a b c d e f __m256i _val0 = _mm256_loadu_si256((const __m256i*)r0); __m256i _w01 = _mm256_loadu_si256((const __m256i*)k0); __m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ __m256i _val0_0123 = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0)); __m256i _val0_4567 = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2)); __m256i _val0_89ab = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4)); __m256i _val0_cdef = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6)); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val0_0123); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w01, _val0_89ab); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val0_4567); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w23, _val0_cdef); #else // 0 0 1 1 2 2 3 3 8 8 9 9 a a b b // 4 4 5 5 6 6 7 7 c c d d e e f f __m256i _val0_0123_89ab = _mm256_unpacklo_epi16(_val0, _val0); __m256i _val0_4567_cdef = _mm256_unpackhi_epi16(_val0, _val0); __m256i _val0_0123 = _mm256_permutevar8x32_epi32(_val0_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val0_4567 = _mm256_permutevar8x32_epi32(_val0_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val0_89ab = _mm256_permutevar8x32_epi32(_val0_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _val0_cdef = _mm256_permutevar8x32_epi32(_val0_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val0_0123); __m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val0_0123); __m256i _sl10_11 = _mm256_mullo_epi16(_w01, _val0_89ab); __m256i _sh10_11 = _mm256_mulhi_epi16(_w01, _val0_89ab); __m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val0_4567); __m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val0_4567); __m256i _sl12_13 = _mm256_mullo_epi16(_w23, _val0_cdef); __m256i _sh12_13 = _mm256_mulhi_epi16(_w23, _val0_cdef); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl12_13, _sh12_13)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl12_13, _sh12_13)); #endif __m256i _val1 = _mm256_loadu_si256((const __m256i*)(r0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ __m256i _val1_0123 = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0)); __m256i _val1_4567 = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2)); __m256i _val1_89ab = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4)); __m256i _val1_cdef = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6)); _sum4_5 = _mm256_dpwssd_epi32(_sum4_5, _w01, _val1_0123); _sum6_7 = _mm256_dpwssd_epi32(_sum6_7, _w01, _val1_89ab); _sum4_5 = _mm256_dpwssd_epi32(_sum4_5, _w23, _val1_4567); _sum6_7 = _mm256_dpwssd_epi32(_sum6_7, _w23, _val1_cdef); #else __m256i _val1_0123_89ab = _mm256_unpacklo_epi16(_val1, _val1); __m256i _val1_4567_cdef = _mm256_unpackhi_epi16(_val1, _val1); __m256i _val1_0123 = _mm256_permutevar8x32_epi32(_val1_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val1_4567 = _mm256_permutevar8x32_epi32(_val1_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val1_89ab = _mm256_permutevar8x32_epi32(_val1_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _val1_cdef = _mm256_permutevar8x32_epi32(_val1_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _sl04_05 = _mm256_mullo_epi16(_w01, _val1_0123); __m256i _sh04_05 = _mm256_mulhi_epi16(_w01, _val1_0123); __m256i _sl14_15 = _mm256_mullo_epi16(_w01, _val1_89ab); __m256i _sh14_15 = _mm256_mulhi_epi16(_w01, _val1_89ab); __m256i _sl06_07 = _mm256_mullo_epi16(_w23, _val1_4567); __m256i _sh06_07 = _mm256_mulhi_epi16(_w23, _val1_4567); __m256i _sl16_17 = _mm256_mullo_epi16(_w23, _val1_cdef); __m256i _sh16_17 = _mm256_mulhi_epi16(_w23, _val1_cdef); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpacklo_epi16(_sl04_05, _sh04_05)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpacklo_epi16(_sl14_15, _sh14_15)); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpacklo_epi16(_sl06_07, _sh06_07)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpacklo_epi16(_sl16_17, _sh16_17)); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpackhi_epi16(_sl04_05, _sh04_05)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpackhi_epi16(_sl14_15, _sh14_15)); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpackhi_epi16(_sl06_07, _sh06_07)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpackhi_epi16(_sl16_17, _sh16_17)); #endif r0 += 32; k0 += 32; } __m256i _sum0_2 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 2, 0, 0)); __m256i _sum1_3 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 3, 0, 1)); _sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3); __m256i _sum4_6 = _mm256_permute2x128_si256(_sum4_5, _sum6_7, _MM_SHUFFLE(0, 2, 0, 0)); __m256i _sum5_7 = _mm256_permute2x128_si256(_sum4_5, _sum6_7, _MM_SHUFFLE(0, 3, 0, 1)); _sum4_6 = _mm256_add_epi32(_sum4_6, _sum5_7); int sum[16]; _mm256_storeu_si256((__m256i*)sum, _sum0_2); _mm256_storeu_si256((__m256i*)(sum + 8), _sum4_6); output0_tm[0] = sum[0]; output1_tm[0] = sum[1]; output2_tm[0] = sum[2]; output3_tm[0] = sum[3]; output0_tm[1] = sum[4]; output1_tm[1] = sum[5]; output2_tm[1] = sum[6]; output3_tm[1] = sum[7]; output0_tm[2] = sum[8]; output1_tm[2] = sum[9]; output2_tm[2] = sum[10]; output3_tm[2] = sum[11]; output0_tm[3] = sum[12]; output1_tm[3] = sum[13]; output2_tm[3] = sum[14]; output3_tm[3] = sum[15]; output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; } #endif for (; i + 1 < tiles; i += 2) { #if __AVX2__ const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2); #else const short* r0 = bb2.row<const short>(i / 2); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 #if __AVX2__ __m256i _sum0_1 = _mm256_setzero_si256(); __m256i _sum2_3 = _mm256_setzero_si256(); #else __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); #endif for (int j = 0; j < nn; j++) { #if __AVX2__ // 0 1 2 3 4 5 6 7 8 9 a b c d e f __m256i _val = _mm256_loadu_si256((const __m256i*)r0); __m256i _w01 = _mm256_loadu_si256((const __m256i*)k0); __m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ __m256i _val_0123 = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0)); __m256i _val_4567 = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2)); __m256i _val_89ab = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4)); __m256i _val_cdef = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6)); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val_0123); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w01, _val_89ab); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val_4567); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w23, _val_cdef); #else __m256i _val_0123_89ab = _mm256_unpacklo_epi16(_val, _val); __m256i _val_4567_cdef = _mm256_unpackhi_epi16(_val, _val); __m256i _val_0123 = _mm256_permutevar8x32_epi32(_val_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val_4567 = _mm256_permutevar8x32_epi32(_val_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val_89ab = _mm256_permutevar8x32_epi32(_val_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _val_cdef = _mm256_permutevar8x32_epi32(_val_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val_0123); __m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val_0123); __m256i _sl10_11 = _mm256_mullo_epi16(_w01, _val_89ab); __m256i _sh10_11 = _mm256_mulhi_epi16(_w01, _val_89ab); __m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val_4567); __m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val_4567); __m256i _sl12_13 = _mm256_mullo_epi16(_w23, _val_cdef); __m256i _sh12_13 = _mm256_mulhi_epi16(_w23, _val_cdef); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl12_13, _sh12_13)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl12_13, _sh12_13)); #endif #else // 0 1 2 3 4 5 6 7 __m128i _val0 = _mm_loadu_si128((const __m128i*)r0); __m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8)); __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _w1 = _mm_loadu_si128((const __m128i*)(k0 + 8)); __m128i _w2 = _mm_loadu_si128((const __m128i*)(k0 + 16)); __m128i _w3 = _mm_loadu_si128((const __m128i*)(k0 + 24)); #if __XOP__ __m128i _val0_01 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val0_23 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val0_45 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val0_67 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(3, 3, 3, 3)); __m128i _val1_01 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val1_23 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val1_45 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val1_67 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(3, 3, 3, 3)); _sum0 = _mm_maddd_epi16(_val0_01, _w0, _sum0); _sum1 = _mm_maddd_epi16(_val0_23, _w1, _sum1); _sum2 = _mm_maddd_epi16(_val1_01, _w0, _sum2); _sum3 = _mm_maddd_epi16(_val1_23, _w1, _sum3); _sum0 = _mm_maddd_epi16(_val0_45, _w2, _sum0); _sum1 = _mm_maddd_epi16(_val0_67, _w3, _sum1); _sum2 = _mm_maddd_epi16(_val1_45, _w2, _sum2); _sum3 = _mm_maddd_epi16(_val1_67, _w3, _sum3); #else // 0 0 1 1 2 2 3 3 // 4 4 5 5 6 6 7 7 __m128i _val0_0123 = _mm_unpacklo_epi16(_val0, _val0); __m128i _val0_4567 = _mm_unpackhi_epi16(_val0, _val0); __m128i _val1_0123 = _mm_unpacklo_epi16(_val1, _val1); __m128i _val1_4567 = _mm_unpackhi_epi16(_val1, _val1); __m128i _val0_01 = _mm_unpacklo_epi32(_val0_0123, _val0_0123); __m128i _val0_23 = _mm_unpackhi_epi32(_val0_0123, _val0_0123); __m128i _val0_45 = _mm_unpacklo_epi32(_val0_4567, _val0_4567); __m128i _val0_67 = _mm_unpackhi_epi32(_val0_4567, _val0_4567); __m128i _val1_01 = _mm_unpacklo_epi32(_val1_0123, _val1_0123); __m128i _val1_23 = _mm_unpackhi_epi32(_val1_0123, _val1_0123); __m128i _val1_45 = _mm_unpacklo_epi32(_val1_4567, _val1_4567); __m128i _val1_67 = _mm_unpackhi_epi32(_val1_4567, _val1_4567); __m128i _sl00 = _mm_mullo_epi16(_w0, _val0_01); __m128i _sh00 = _mm_mulhi_epi16(_w0, _val0_01); __m128i _sl10 = _mm_mullo_epi16(_w0, _val1_01); __m128i _sh10 = _mm_mulhi_epi16(_w0, _val1_01); __m128i _sl01 = _mm_mullo_epi16(_w1, _val0_23); __m128i _sh01 = _mm_mulhi_epi16(_w1, _val0_23); __m128i _sl11 = _mm_mullo_epi16(_w1, _val1_23); __m128i _sh11 = _mm_mulhi_epi16(_w1, _val1_23); __m128i _sl02 = _mm_mullo_epi16(_w2, _val0_45); __m128i _sh02 = _mm_mulhi_epi16(_w2, _val0_45); __m128i _sl12 = _mm_mullo_epi16(_w2, _val1_45); __m128i _sh12 = _mm_mulhi_epi16(_w2, _val1_45); __m128i _sl03 = _mm_mullo_epi16(_w3, _val0_67); __m128i _sh03 = _mm_mulhi_epi16(_w3, _val0_67); __m128i _sl13 = _mm_mullo_epi16(_w3, _val1_67); __m128i _sh13 = _mm_mulhi_epi16(_w3, _val1_67); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl10, _sh10)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl10, _sh10)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl01, _sh01)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl01, _sh01)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl11, _sh11)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl11, _sh11)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl02, _sh02)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl02, _sh02)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl12, _sh12)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl12, _sh12)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl03, _sh03)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl03, _sh03)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl13, _sh13)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl13, _sh13)); #endif #endif r0 += 16; k0 += 32; } #if __AVX2__ __m256i _sum0_2 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 2, 0, 0)); __m256i _sum1_3 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 3, 0, 1)); _sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3); int sum[8]; _mm256_storeu_si256((__m256i*)sum, _sum0_2); #else _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); int sum[8]; _mm_storeu_si128((__m128i*)sum, _sum0); _mm_storeu_si128((__m128i*)(sum + 4), _sum2); #endif output0_tm[0] = sum[0]; output1_tm[0] = sum[1]; output2_tm[0] = sum[2]; output3_tm[0] = sum[3]; output0_tm[1] = sum[4]; output1_tm[1] = sum[5]; output2_tm[1] = sum[6]; output3_tm[1] = sum[7]; output0_tm += 2; output1_tm += 2; output2_tm += 2; output3_tm += 2; } for (; i < tiles; i++) { #if __AVX2__ const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2); #else const short* r0 = bb2.row<const short>(i / 2 + i % 2); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 #if __AVX2__ __m256i _sum0_1 = _mm256_setzero_si256(); #else __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); #endif for (int j = 0; j < nn; j++) { // 0 1 2 3 4 5 6 7 __m128i _val = _mm_loadu_si128((const __m128i*)r0); #if __AVX2__ __m256i _w01 = _mm256_loadu_si256((const __m256i*)k0); __m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ // 0 1 0 1 x x x x // 0 1 0 1 0 1 0 1 __m128i _val_01 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val_23 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val_45 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val_67 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(3, 3, 3, 3)); __m256i _val_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(_val_01), _val_23, 1); __m256i _val_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(_val_45), _val_67, 1); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val_0123); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val_4567); #else // 0 0 1 1 2 2 3 3 // 4 4 5 5 6 6 7 7 __m256i _val_0123 = _mm256_castsi128_si256(_mm_unpacklo_epi16(_val, _val)); __m256i _val_4567 = _mm256_castsi128_si256(_mm_unpackhi_epi16(_val, _val)); _val_0123 = _mm256_permutevar8x32_epi32(_val_0123, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); _val_4567 = _mm256_permutevar8x32_epi32(_val_4567, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val_0123); __m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val_0123); __m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val_4567); __m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val_4567); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03)); #endif #else __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _w1 = _mm_loadu_si128((const __m128i*)(k0 + 8)); __m128i _w2 = _mm_loadu_si128((const __m128i*)(k0 + 16)); __m128i _w3 = _mm_loadu_si128((const __m128i*)(k0 + 24)); #if __XOP__ __m128i _val01 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val23 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val45 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val67 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(3, 3, 3, 3)); _sum0 = _mm_maddd_epi16(_val01, _w0, _sum0); _sum1 = _mm_maddd_epi16(_val23, _w1, _sum1); _sum0 = _mm_maddd_epi16(_val45, _w2, _sum0); _sum1 = _mm_maddd_epi16(_val67, _w3, _sum1); #else // 0 0 1 1 2 2 3 3 // 4 4 5 5 6 6 7 7 __m128i _val_0123 = _mm_unpacklo_epi16(_val, _val); __m128i _val_4567 = _mm_unpackhi_epi16(_val, _val); __m128i _val01 = _mm_unpacklo_epi32(_val_0123, _val_0123); __m128i _val23 = _mm_unpackhi_epi32(_val_0123, _val_0123); __m128i _val45 = _mm_unpacklo_epi32(_val_4567, _val_4567); __m128i _val67 = _mm_unpackhi_epi32(_val_4567, _val_4567); __m128i _sl0 = _mm_mullo_epi16(_w0, _val01); __m128i _sh0 = _mm_mulhi_epi16(_w0, _val01); __m128i _sl1 = _mm_mullo_epi16(_w1, _val23); __m128i _sh1 = _mm_mulhi_epi16(_w1, _val23); __m128i _sl2 = _mm_mullo_epi16(_w2, _val45); __m128i _sh2 = _mm_mulhi_epi16(_w2, _val45); __m128i _sl3 = _mm_mullo_epi16(_w3, _val67); __m128i _sh3 = _mm_mulhi_epi16(_w3, _val67); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl1, _sh1)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl1, _sh1)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl2, _sh2)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl2, _sh2)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl3, _sh3)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl3, _sh3)); #endif #endif r0 += 8; k0 += 32; } #if __AVX2__ __m128i _sum0 = _mm256_extracti128_si256(_sum0_1, 0); __m128i _sum1 = _mm256_extracti128_si256(_sum0_1, 1); #endif _sum0 = _mm_add_epi32(_sum0, _sum1); int sum[4]; _mm_storeu_si128((__m128i*)sum, _sum0); output0_tm[0] = sum[0]; output1_tm[0] = sum[1]; output2_tm[0] = sum[2]; output3_tm[0] = sum[3]; output0_tm += 1; output1_tm += 1; output2_tm += 1; output3_tm += 1; } } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { int* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 4 + p % 4); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __AVX2__ for (; i + 3 < tiles; i += 4) { const short* r0 = bb2.row<const short>(i / 4); const short* k0 = kernel0_tm.row<const short>(r); __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); __m128i _sum4 = _mm_setzero_si128(); __m128i _sum5 = _mm_setzero_si128(); __m128i _sum6 = _mm_setzero_si128(); __m128i _sum7 = _mm_setzero_si128(); for (int q = 0; q < inch; q++) { __m128i _val0 = _mm_loadu_si128((const __m128i*)r0); __m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8)); __m128i _val2 = _mm_loadu_si128((const __m128i*)(r0 + 16)); __m128i _val3 = _mm_loadu_si128((const __m128i*)(r0 + 24)); __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _sl0 = _mm_mullo_epi16(_val0, _w0); __m128i _sh0 = _mm_mulhi_epi16(_val0, _w0); __m128i _sl1 = _mm_mullo_epi16(_val1, _w0); __m128i _sh1 = _mm_mulhi_epi16(_val1, _w0); __m128i _sl2 = _mm_mullo_epi16(_val2, _w0); __m128i _sh2 = _mm_mulhi_epi16(_val2, _w0); __m128i _sl3 = _mm_mullo_epi16(_val3, _w0); __m128i _sh3 = _mm_mulhi_epi16(_val3, _w0); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl1, _sh1)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl1, _sh1)); _sum4 = _mm_add_epi32(_sum4, _mm_unpacklo_epi16(_sl2, _sh2)); _sum5 = _mm_add_epi32(_sum5, _mm_unpackhi_epi16(_sl2, _sh2)); _sum6 = _mm_add_epi32(_sum6, _mm_unpacklo_epi16(_sl3, _sh3)); _sum7 = _mm_add_epi32(_sum7, _mm_unpackhi_epi16(_sl3, _sh3)); k0 += 8; r0 += 32; } _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); _sum4 = _mm_add_epi32(_sum4, _sum5); _sum6 = _mm_add_epi32(_sum6, _sum7); output0_tm[0] = _mm_reduce_add_epi32(_sum0); output0_tm[1] = _mm_reduce_add_epi32(_sum2); output0_tm[2] = _mm_reduce_add_epi32(_sum4); output0_tm[3] = _mm_reduce_add_epi32(_sum6); output0_tm += 4; } #endif for (; i + 1 < tiles; i += 2) { #if __AVX2__ const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2); #else const short* r0 = bb2.row<const short>(i / 2); #endif const short* k0 = kernel0_tm.row<const short>(r); __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); for (int q = 0; q < inch; q++) { __m128i _val0 = _mm_loadu_si128((const __m128i*)r0); __m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8)); __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _sl0 = _mm_mullo_epi16(_val0, _w0); __m128i _sh0 = _mm_mulhi_epi16(_val0, _w0); __m128i _sl1 = _mm_mullo_epi16(_val1, _w0); __m128i _sh1 = _mm_mulhi_epi16(_val1, _w0); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl1, _sh1)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl1, _sh1)); k0 += 8; r0 += 16; } _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); output0_tm[0] = _mm_reduce_add_epi32(_sum0); output0_tm[1] = _mm_reduce_add_epi32(_sum2); output0_tm += 2; } for (; i < tiles; i++) { #if __AVX2__ const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2); #else const short* r0 = bb2.row<const short>(i / 2 + i % 2); #endif const short* k0 = kernel0_tm.row<const short>(r); __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); for (int q = 0; q < inch; q++) { __m128i _val = _mm_loadu_si128((const __m128i*)r0); __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _sl0 = _mm_mullo_epi16(_val, _w0); __m128i _sh0 = _mm_mulhi_epi16(_val, _w0); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0)); k0 += 8; r0 += 8; } _sum0 = _mm_add_epi32(_sum0, _sum1); output0_tm[0] = _mm_reduce_add_epi32(_sum0); output0_tm++; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 4u, 1, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); int tmp[4][6]; // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator); const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 1; const int* output0_tm_1 = output0_tm_0 + tiles * 1; const int* output0_tm_2 = output0_tm_0 + tiles * 2; const int* output0_tm_3 = output0_tm_0 + tiles * 3; const int* output0_tm_4 = output0_tm_0 + tiles * 4; const int* output0_tm_5 = output0_tm_0 + tiles * 5; int* output0 = out0.row<int>(i * 4) + j * 4; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 // TODO sse optimize for (int m = 0; m < 5; m++) { int tmp02a = output0_tm_1[0] + output0_tm_2[0]; int tmp13a = output0_tm_1[0] - output0_tm_2[0]; int tmp02b = output0_tm_3[0] + output0_tm_4[0]; int tmp13b = output0_tm_3[0] - output0_tm_4[0]; tmp[0][m] = output0_tm_0[0] + tmp02a + tmp02b; tmp[1][m] = tmp13a + tmp13b * 2; tmp[2][m] = tmp02a + tmp02b * 4; tmp[3][m] = output0_tm_5[0] * 4 + tmp13a + tmp13b * 8; output0_tm_0 += tiles * 6; output0_tm_1 += tiles * 6; output0_tm_2 += tiles * 6; output0_tm_3 += tiles * 6; output0_tm_4 += tiles * 6; output0_tm_5 += tiles * 6; } for (int m = 5; m < 6; m++) { int tmp02a = output0_tm_1[0] + output0_tm_2[0]; int tmp13a = output0_tm_1[0] - output0_tm_2[0]; int tmp02b = output0_tm_3[0] + output0_tm_4[0]; int tmp13b = output0_tm_3[0] - output0_tm_4[0]; tmp[0][m] = (output0_tm_0[0] + tmp02a + tmp02b) * 4; tmp[1][m] = (tmp13a + tmp13b * 2) * 4; tmp[2][m] = (tmp02a + tmp02b * 4) * 4; tmp[3][m] = (output0_tm_5[0] * 4 + tmp13a + tmp13b * 8) * 4; output0_tm_0 += tiles * 6; output0_tm_1 += tiles * 6; output0_tm_2 += tiles * 6; output0_tm_3 += tiles * 6; output0_tm_4 += tiles * 6; output0_tm_5 += tiles * 6; } for (int m = 0; m < 4; m++) { const int* tmp0 = tmp[m]; int tmp02a = tmp0[1] + tmp0[2]; int tmp13a = tmp0[1] - tmp0[2]; int tmp02b = tmp0[3] + tmp0[4]; int tmp13b = tmp0[3] - tmp0[4]; output0[0] = (tmp0[0] + tmp02a + tmp02b) / 576; output0[1] = (tmp13a + tmp13b * 2) / 576; output0[2] = (tmp02a + tmp02b * 4) / 576; output0[3] = (tmp0[5] + tmp13a + tmp13b * 8) / 576; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 4; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1+2,2),ceild(4*t2-Nz+9,4));t3<=min(min(floord(4*Nt+Ny-9,4),floord(2*t1+Ny-3,4)),floord(4*t2+Ny-9,4));t3++) { for (t4=max(max(ceild(t1-12,16),ceild(4*t2-Nz-19,32)),ceild(4*t3-Ny-19,32));t4<=min(min(min(floord(4*Nt+Nx-9,32),floord(2*t1+Nx-3,32)),floord(4*t2+Nx-9,32)),floord(4*t3+Nx-9,32));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(32*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(32*t4,4*t5+4); ubv=min(32*t4+31,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
sum.c
#include <stdio.h> #define NX 102400 int main(void) { long vecA[NX]; long sum, psum, sumex; int i; /* Initialization of the vectors */ for (i = 0; i < NX; i++) { vecA[i] = (long) i + 1; } sumex = (long) NX * (NX + 1) / ((long) 2); printf("Arithmetic sum formula (exact): %ld\n", sumex); sum = 0.0; /* Version with data race */ #pragma omp parallel for default(shared) private(i) for (i = 0; i < NX; i++) { sum += vecA[i]; } printf("Sum with data race: %ld\n", sum); return 0; }
ordering_op-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file ordering_op-inl.h * \brief Function definition of ordering operators */ #ifndef MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_ #define MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_ #include <mxnet/operator_util.h> #include <dmlc/optional.h> #include <mshadow/tensor.h> #include <algorithm> #include <vector> #include <string> #include <type_traits> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "./sort_op.h" #include "./indexing_op.h" #include "../../api/operator/op_utils.h" namespace mshadow { template<typename xpu, int src_dim, typename DType, int dst_dim> inline Tensor<xpu, dst_dim, DType> inplace_reshape(Tensor<xpu, src_dim, DType> src, Shape<dst_dim> target_shape) { CHECK_EQ(src.CheckContiguous(), true); return Tensor<xpu, dst_dim, DType>(src.dptr_, target_shape, src.stream_); } }; namespace mxnet { namespace op { // These enums are only visible within this header namespace topk_enum { enum TopKReturnType {kReturnValue, kReturnIndices, kReturnMask, kReturnBoth}; } // topk_enum struct TopKParam : public dmlc::Parameter<TopKParam> { dmlc::optional<int> axis; int k; int ret_typ; bool is_ascend; int dtype; DMLC_DECLARE_PARAMETER(TopKParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to choose the top k indices." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(k).set_default(1) .describe("Number of top elements to select," " should be always smaller than or equal to the element number in the given axis." " A global sort is performed if set k < 1."); DMLC_DECLARE_FIELD(ret_typ).set_default(topk_enum::kReturnIndices) .add_enum("value", topk_enum::kReturnValue) .add_enum("indices", topk_enum::kReturnIndices) .add_enum("mask", topk_enum::kReturnMask) .add_enum("both", topk_enum::kReturnBoth) .describe("The return type.\n" " \"value\" means to return the top k values," " \"indices\" means to return the indices of the top k values," " \"mask\" means to return a mask array containing 0 and 1. 1 means the top k values." " \"both\" means to return a list of both values and indices of top k elements."); DMLC_DECLARE_FIELD(is_ascend).set_default(false) .describe("Whether to choose k largest or k smallest elements." " Top K largest elements will be chosen if set to false."); DMLC_DECLARE_FIELD(dtype) // TODO(srivrohi): remove support for real data type in mxnet-2.0 .add_enum("uint8", mshadow::kUint8) .add_enum("int32", mshadow::kInt32) .add_enum("int64", mshadow::kInt64) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(mshadow::kFloat32) .describe("DType of the output indices when ret_typ is \"indices\" or \"both\". " "An error will be raised if the selected data type cannot precisely represent the " "indices."); } std::string ReturnType2String(int ret_typ) { switch (ret_typ) { case topk_enum::kReturnValue: return "value"; case topk_enum::kReturnIndices: return "indices"; case topk_enum::kReturnMask: return "mask"; case topk_enum::kReturnBoth: return "both"; default: LOG(FATAL) << "Unknown return type enum " << ret_typ; } LOG(FATAL) << "should not reach here "; return ""; } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream axis_s, k_s, ret_typ_s, is_ascend_s, dtype_s; axis_s << axis; k_s << k; dtype_s << dtype; ret_typ_s << ret_typ; is_ascend_s << is_ascend; (*dict)["axis"] = axis_s.str(); (*dict)["k"] = k_s.str(); (*dict)["ret_typ"] = ReturnType2String(ret_typ); (*dict)["is_ascend"] = is_ascend_s.str(); (*dict)["dtype"] = MXNetTypeWithBool2String(dtype); } }; struct SortParam : public dmlc::Parameter<SortParam> { dmlc::optional<int> axis; bool is_ascend; DMLC_DECLARE_PARAMETER(SortParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to choose sort the input tensor." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(is_ascend).set_default(true) .describe("Whether to sort in ascending or descending order."); } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream axis_s, is_ascend_s; axis_s << axis; is_ascend_s << is_ascend; (*dict)["axis"] = axis_s.str(); (*dict)["is_ascend_s"] = is_ascend_s.str(); } }; struct ArgSortParam : public dmlc::Parameter<ArgSortParam> { dmlc::optional<int> axis; bool is_ascend; int dtype; DMLC_DECLARE_PARAMETER(ArgSortParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to sort the input tensor." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(is_ascend).set_default(true) .describe("Whether to sort in ascending or descending order."); DMLC_DECLARE_FIELD(dtype) // TODO(srivrohi): remove support for real data type in mxnet-2.0 .add_enum("uint8", mshadow::kUint8) .add_enum("int32", mshadow::kInt32) .add_enum("int64", mshadow::kInt64) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(mshadow::kFloat32) .describe("DType of the output indices. It is only valid when ret_typ is \"indices\" or" " \"both\". An error will be raised if the selected data type cannot precisely " "represent the indices."); } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream axis_s, is_ascend_s, dtype_s; axis_s << axis; is_ascend_s << is_ascend; dtype_s << dtype; (*dict)["axis"] = axis_s.str(); (*dict)["is_ascend_s"] = is_ascend_s.str(); (*dict)["dtype"] = MXNetTypeWithBool2String(dtype); } }; template<typename IDXType = index_t> inline void ParseTopKParam(const TShape& src_shape, const TopKParam& param, TShape *target_shape, size_t *batch_size, IDXType *element_num, int *axis, IDXType *k, bool *do_transpose, bool *is_ascend) { *do_transpose = false; *k = param.k; *is_ascend = param.is_ascend; // get batch_size, axis and element_num if (!static_cast<bool>(param.axis)) { // No axis given *axis = 0; *batch_size = 1; *element_num = src_shape.Size(); } else { *axis = param.axis.value(); if (*axis < 0) { *axis += src_shape.ndim(); } CHECK(*axis >= 0 && *axis < static_cast<int>(src_shape.ndim())) << "Invalid axis! axis should be between 0 and " << src_shape.ndim() << ", found axis=" << *axis; if (src_shape[*axis] != 0) { *batch_size = src_shape.Size() / src_shape[*axis]; } *element_num = src_shape[*axis]; if (*axis != src_shape.ndim() - 1) { *do_transpose = true; } } // get k if (param.k <= 0) { *k = *element_num; } // get target_shape if (!static_cast<bool>(param.axis)) { if (param.ret_typ != topk_enum::kReturnMask) { *target_shape = mshadow::Shape1(*k); } else { *target_shape = src_shape; } } else { *target_shape = src_shape; if (param.ret_typ != topk_enum::kReturnMask) { (*target_shape)[*axis] = *k; } } CHECK(*k >= 0 && *k <= *element_num) << "k must be smaller than " << *element_num << ", get k = " << *k; } using namespace mshadow; struct fill_ind_to_one { template<typename DType, typename IDXType> MSHADOW_XINLINE static void Map(index_t i, const IDXType* indices, DType* out) { out[indices[i]] = static_cast<DType>(1); } }; struct fill_ind { template<typename DType, typename IDXType> MSHADOW_XINLINE static void Map(index_t i, const IDXType* indices, const DType* val, int req, DType* out) { KERNEL_ASSIGN(out[indices[i]], req, val[i]); } }; template<typename DType, typename IDXType> MSHADOW_FORCE_INLINE void TopKSort(const Tensor<cpu, 1, DType>& dat, const Tensor<cpu, 1, IDXType>& ind, const Tensor<cpu, 1, char>& work, IDXType K, IDXType N, bool is_ascend, Stream<cpu> *s) { // Use full sort when K is relatively large. const bool full_sort(K*8 > N); // Batch size. const size_t M(work.size(0)/(sizeof(DType)*N)); const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(M); ++i) { // Tensor `work` stores the flattened source data, while `dat` stores the sorted result. DType *vals = reinterpret_cast<DType*>(work.dptr_); DType *sorted_vals = dat.dptr_+i*N; IDXType *indices = ind.dptr_+i*N; if (is_ascend) { if (full_sort) { std::sort(indices, indices+N, [&](const IDXType& i1, const IDXType& i2){ return vals[i1] < vals[i2]; }); } else { std::partial_sort(indices, indices+K, indices+N, [&](const IDXType& i1, const IDXType& i2){ return vals[i1] < vals[i2]; }); } } else { if (full_sort) { std::sort(indices, indices+N, [&](const IDXType& i1, const IDXType& i2){ return vals[i1] > vals[i2]; }); } else { std::partial_sort(indices, indices+K, indices+N, [&](const IDXType& i1, const IDXType& i2){ return vals[i1] > vals[i2]; }); } } for (IDXType j = 0; j < K; ++j) { sorted_vals[j] = vals[indices[j]]; } } } #ifdef __CUDACC__ template<typename DType, typename IDXType> MSHADOW_XINLINE bool TopKCompare(DType val1, IDXType ind1, DType val2, IDXType ind2, bool is_ascend) { // Negative indices denote undefined values which are considered arbitrary small resp. large. return (ind2 < 0) || (ind1 >= 0 && ((is_ascend && val1 < val2) || (!is_ascend && val1 > val2))); } template<typename DType, typename IDXType> MSHADOW_XINLINE void MergeTopK(IDXType K, DType *val1, IDXType *ind1, DType *val2, IDXType *ind2, bool is_ascend) { // In-place merge of two sorted top-K lists into val1/ind1. First determine the intervals // [0,..,i1], [0,..i2] of the two lists that will be part of the merged list. IDXType i1(K-1), i2(K-1); for (IDXType i = 0; i < K; ++i) { if (TopKCompare(val1[i1], ind1[i1], val2[i2], ind2[i2], is_ascend)) { --i2; } else { --i1; } } // Now merge the lists from back to front. for (IDXType i = K; i--;) { if (i2 < 0 || i1 >= 0 && TopKCompare(val2[i2], ind2[i2], val1[i1], ind1[i1], is_ascend)) { val1[i] = val1[i1]; ind1[i] = ind1[i1]; --i1; } else { val1[i] = val2[i2]; ind1[i] = ind2[i2]; --i2; } } } template<typename DType, typename IDXType> __global__ void PartialSortSmallK(IDXType K, IDXType N, DType *val, IDXType *ind, bool is_ascend) { // Buffer for pairwise reduction. extern __shared__ __align__(sizeof(IDXType)) unsigned char temp_smem[]; IDXType *buff = reinterpret_cast<IDXType *>(temp_smem); // Start of buffer sections associated with this thread. const IDXType offset(threadIdx.x*K); IDXType *ind_buff = reinterpret_cast<IDXType*>(&buff[offset]); DType *val_buff = reinterpret_cast<DType*>(&buff[blockDim.x*K])+offset; // Initialize top-K values for this thread. for (IDXType i = 0; i < K; ++i) { ind_buff[i] = -1; } // Range of values this thread cares about. Each thread block processes // a different batch item (i.e. a different set of ind/val where we // have to select the top-K elements). All threads within the same // block work on the same batch item. const IDXType first(blockIdx.x*N+threadIdx.x), last((blockIdx.x+1)*N); // Select top-K from this range and store it sorted in the buffer. // We assume a small K, so linear insertion is o.k. for (IDXType i = first; i < last; i += blockDim.x) { DType cur_val(val[i]); IDXType cur_ind(ind[i]); for (IDXType j = K; j-- && TopKCompare(cur_val, cur_ind, val_buff[j], ind_buff[j], is_ascend); ) { if (j+1 < K) { val_buff[j+1] = val_buff[j]; ind_buff[j+1] = ind_buff[j]; } val_buff[j] = cur_val; ind_buff[j] = cur_ind; } } // Recursive merge of sorted lists for this thread block. Note that blockDim.x is not // necessary a power of two, therefore the additional checks for last_s. for (IDXType s = (blockDim.x+1)/2, last_s = blockDim.x; last_s > 1; last_s = s, s = (s+1)/2) { __syncthreads(); if (threadIdx.x < s && threadIdx.x+s < last_s) { MergeTopK(K, val_buff, ind_buff, val_buff+s*K, ind_buff+s*K, is_ascend); } } // Final updates on master thread. if (threadIdx.x == 0) { for (IDXType i = 0; i < K; ++i) { ind[blockIdx.x*N+i] = ind_buff[i]; val[blockIdx.x*N+i] = val_buff[i]; } } } template<typename DType, typename IDXType> MSHADOW_FORCE_INLINE void TopKSort(const Tensor<gpu, 1, DType>& dat, const Tensor<gpu, 1, IDXType>& ind, const Tensor<gpu, 1, char>& work, IDXType K, IDXType N, bool is_ascend, Stream<gpu> *s) { // Use full sort for all but very small K for which we // can do a partial sort entirely within shared memory. const bool full_sort(K > 5); // Batch size. const size_t M(dat.size(0)/N); if (full_sort) { // Divide workspace into two parts. The first one is needed to store batch ids. size_t alignment = std::max(sizeof(DType), sizeof(IDXType)); size_t id_size = PadBytes(sizeof(IDXType) * ind.size(0), alignment); Tensor<gpu, 1, IDXType> batch_id(reinterpret_cast<IDXType*>(work.dptr_), Shape1(ind.size(0)), s); Tensor<gpu, 1, char> sort_work(work.dptr_+id_size, Shape1(work.size(0)-id_size), s); mxnet::op::SortByKey(dat, ind, is_ascend, &sort_work); if (M > 1) { // Back to back sorting. Note that mxnet::op::SortByKey is a stable sort. batch_id = ind / N; mxnet::op::SortByKey(batch_id, dat, true, &sort_work); batch_id = ind / N; mxnet::op::SortByKey(batch_id, ind, true, &sort_work); } } else { const IDXType nthreads(mshadow::cuda::kBaseThreadNum); PartialSortSmallK<<<M, nthreads, nthreads*K*(sizeof(IDXType)+sizeof(DType)), mshadow::Stream<gpu>::GetStream(s)>>> (K, N, dat.dptr_, ind.dptr_, is_ascend); } } #endif /*! * \brief Implementation of the TopK operation * * * \param ctx the running context * \param resource temporary resource handler * \param src the Source blob * \param ret the destination blobs * \param param the topk parameters * \tparam xpu the device type. * \tparam DType type of the output value/mask. * \tparam IDType type of the output indices. */ template<typename xpu, typename DType, typename IDType, typename IDXType> void TopKImpl(const RunContext &ctx, const Resource &resource, const std::vector<OpReqType>& req, const TBlob& src, const std::vector<TBlob>& ret, const TopKParam& param) { using namespace mshadow; using namespace mshadow::expr; // 0. If input shape is 0-shape, directly return if (src.Size() == 0) return; // 1. Parse and initialize information Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 1, char> workspace; Tensor<xpu, 1, char> temp_workspace; Tensor<xpu, 1, DType> sorted_dat; Tensor<xpu, 1, IDXType> indices, sel_indices; size_t batch_size = 0; IDXType element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; IDXType k = 0; size_t alignment = std::max(sizeof(DType), sizeof(IDXType)); mxnet::TShape target_shape; ParseTopKParam(src.shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDXType>()) << "'index_t' does not have a sufficient precision to represent " << "the indices of the input array. The total element_num is " << element_num << ", but the selected index_t can only represent " << mxnet::common::MaxIntegerValue<IDXType>() << " elements"; Tensor<xpu, 3, DType> dat = src.FlatTo3D<xpu, DType>(axis, axis, s); // Temp space needed by the full sorts. size_t temp_size = std::max( mxnet::op::SortByKeyWorkspaceSize<IDXType, DType, xpu>(src.Size()), mxnet::op::SortByKeyWorkspaceSize<DType, IDXType, xpu>(src.Size())); temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<IDXType, IDXType, xpu>(src.Size())); // Additional temp space for gpu full sorts for batch ids. temp_size += PadBytes(sizeof(IDXType) * src.Size(), alignment); // Temp space for cpu sorts. temp_size = std::max(temp_size, sizeof(DType) * src.Size()); size_t workspace_size = temp_size + PadBytes(sizeof(DType) * src.Size(), alignment) + PadBytes(sizeof(IDXType) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { workspace_size += PadBytes(sizeof(IDXType) * batch_size * k, alignment); } workspace = resource.get_space_typed<xpu, 1, char>(Shape1(workspace_size), s); char* workspace_curr_ptr = workspace.dptr_; sorted_dat = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); // contain sorted dat workspace_curr_ptr += PadBytes(sizeof(DType) * src.Size(), alignment); indices = Tensor<xpu, 1, IDXType>(reinterpret_cast<IDXType*>(workspace_curr_ptr), Shape1(src.Size()), s); // indices in the original matrix workspace_curr_ptr += PadBytes(sizeof(IDXType) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { sel_indices = Tensor<xpu, 1, IDXType>(reinterpret_cast<IDXType*>(workspace_curr_ptr), Shape1(batch_size * k), s); workspace_curr_ptr += PadBytes(sizeof(IDXType) * batch_size * k, alignment); CHECK_EQ(sel_indices.CheckContiguous(), true); } if (std::is_same<xpu, cpu>::value) { Tensor<xpu, 1, DType> flattened_data; if (do_transpose) { flattened_data = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); workspace_curr_ptr += sizeof(DType) * src.Size(); flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); CHECK_EQ(flattened_data.CheckContiguous(), true); } else { flattened_data = src.FlatTo1D<xpu, DType>(s); } // `temp_workspace` stores the flattened data temp_workspace = Tensor<xpu, 1, char>(reinterpret_cast<char*>(flattened_data.dptr_), Shape1(sizeof(DType)*src.Size()), s); CHECK_EQ(temp_workspace.CheckContiguous(), true); } else { if (do_transpose) { sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); } else { sorted_dat = reshape(dat, Shape1(src.Size())); } CHECK_EQ(sorted_dat.CheckContiguous(), true); temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space workspace_curr_ptr += temp_size; } mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, IDXType{0}, IDXType{1}, kWriteTo, reinterpret_cast<IDXType*>(indices.dptr_)); CHECK_EQ(indices.CheckContiguous(), true); // 2. Perform inplace batch sort. // After sorting, each batch in `sorted_dat` will be sorted in the corresponding order // up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat` // `temp_workspace` is used to store the flattend source data for CPU device, and it's used as // a temporal buffer for GPU device. TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s); // 3. Assign results to the ret blob // When returning indices, only update(modulo) required elements instead of full elements // to avoid redundant calculation. // Cast `ret_indices` from int to real_t could introduce conversion error when the element_num // is large enough. if (param.ret_typ == topk_enum::kReturnMask) { Tensor<xpu, 1, DType> ret_mask = ret[0].FlatTo1D<xpu, DType>(s); ret_mask = scalar<DType>(0); sel_indices = reshape(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), Shape1(batch_size * k)); if (do_transpose) { mxnet::TShape src_shape = src.shape_.FlatTo3D(axis); CHECK_EQ(sel_indices.CheckContiguous(), true); sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } if (req[0] == kNullOp) { return; } else if (req[0] == kWriteTo) { mxnet_op::Kernel<fill_ind_to_one, xpu>::Launch(s, batch_size * k, sel_indices.dptr_, ret_mask.dptr_); } else { LOG(FATAL) << "req=" << req[0] << " is not supported yet."; } } else if (param.ret_typ == topk_enum::kReturnIndices) { if (do_transpose) { Tensor<xpu, 3, IDType> ret_indices = ret[0].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, IDType> ret_indices = ret[0].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } else { if (do_transpose) { Tensor<xpu, 3, DType> ret_value = ret[0].FlatTo3D<xpu, DType>(axis, axis, s); Tensor<xpu, 3, IDType> ret_indices = ret[1].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_value, req[0], transpose( slice<2>(inplace_reshape(sorted_dat, Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)), 0, k), Shape3(0, 2, 1))); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, DType> ret_value = ret[0].get_with_shape<xpu, 2, DType>(Shape2(batch_size, k), s); Tensor<xpu, 2, IDType> ret_indices = ret[1].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_value, req[0], slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k)); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } } template<typename xpu, typename DType> size_t TopKWorkspaceSize(const TBlob& src, const TopKParam& param, size_t *temp_size_ptr) { using namespace mshadow; using namespace mshadow::expr; size_t batch_size = 0; size_t temp_size; index_t element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; index_t k = 0; size_t alignment = std::max(sizeof(DType), sizeof(index_t)); mxnet::TShape target_shape; ParseTopKParam(src.shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); // Temp space needed by the full sorts. temp_size = std::max( mxnet::op::SortByKeyWorkspaceSize<index_t, DType, xpu>(src.Size()), mxnet::op::SortByKeyWorkspaceSize<DType, index_t, xpu>(src.Size())); temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<index_t, index_t, xpu>(src.Size())); // Additional temp space for gpu full sorts for batch ids. temp_size += PadBytes(sizeof(index_t) * src.Size(), alignment); // Temp space for cpu sorts. temp_size = std::max(temp_size, sizeof(DType) * src.Size()); *temp_size_ptr = temp_size; size_t workspace_size = temp_size + PadBytes(sizeof(DType) * src.Size(), alignment) + PadBytes(sizeof(index_t) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { workspace_size += PadBytes(sizeof(index_t) * batch_size * k, alignment); } return workspace_size; } template<typename xpu, typename DType, typename IDType> void TopKImplwithWorkspace(const RunContext &ctx, const std::vector<OpReqType>& req, const TBlob& src, const std::vector<TBlob>& ret, const TopKParam& param, char* workspace_curr_ptr, const size_t &temp_size, Stream<xpu>* s) { using namespace mshadow; using namespace mshadow::expr; // 0. If input shape is 0-shape, directly return if (src.Size() == 0) return; // 1. Parse and initialize information Tensor<xpu, 1, char> workspace; Tensor<xpu, 1, char> temp_workspace; Tensor<xpu, 1, DType> sorted_dat; Tensor<xpu, 1, index_t> indices, sel_indices; size_t batch_size = 0; index_t element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; index_t k = 0; size_t alignment = std::max(sizeof(DType), sizeof(index_t)); mxnet::TShape target_shape; ParseTopKParam(src.shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); CHECK_LE(element_num, mxnet::common::MaxIntegerValue<index_t>()) << "'index_t' does not have a sufficient precision to represent " << "the indices of the input array. The total element_num is " << element_num << ", but the selected index_t can only represent " << mxnet::common::MaxIntegerValue<index_t>() << " elements"; Tensor<xpu, 3, DType> dat = src.FlatTo3D<xpu, DType>(axis, axis, s); sorted_dat = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); // contain sorted dat workspace_curr_ptr += PadBytes(sizeof(DType) * src.Size(), alignment); indices = Tensor<xpu, 1, index_t>(reinterpret_cast<index_t*>(workspace_curr_ptr), Shape1(src.Size()), s); // indices in the original matrix workspace_curr_ptr += PadBytes(sizeof(index_t) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { sel_indices = Tensor<xpu, 1, index_t>(reinterpret_cast<index_t*>(workspace_curr_ptr), Shape1(batch_size * k), s); workspace_curr_ptr += PadBytes(sizeof(index_t) * batch_size * k, alignment); CHECK_EQ(sel_indices.CheckContiguous(), true); } if (std::is_same<xpu, cpu>::value) { Tensor<xpu, 1, DType> flattened_data; if (do_transpose) { flattened_data = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); workspace_curr_ptr += sizeof(DType) * src.Size(); flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); CHECK_EQ(flattened_data.CheckContiguous(), true); } else { flattened_data = src.FlatTo1D<xpu, DType>(s); } // `temp_workspace` stores the flattened data temp_workspace = Tensor<xpu, 1, char>(reinterpret_cast<char*>(flattened_data.dptr_), Shape1(sizeof(DType)*src.Size()), s); CHECK_EQ(temp_workspace.CheckContiguous(), true); } else { if (do_transpose) { sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); } else { sorted_dat = reshape(dat, Shape1(src.Size())); } CHECK_EQ(sorted_dat.CheckContiguous(), true); temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space workspace_curr_ptr += temp_size; } mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, index_t{0}, index_t{1}, kWriteTo, indices.dptr_); CHECK_EQ(indices.CheckContiguous(), true); // 2. Perform inplace batch sort. // After sorting, each batch in `sorted_dat` will be sorted in the corresponding order // up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat` // `temp_workspace` is used to store the flattend source data for CPU device, and it's used as // a temporal buffer for GPU device. TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s); // 3. Assign results to the ret blob // When returning indices, only update(modulo) required elements instead of full elements // to avoid redundant calculation. // Cast `ret_indices` from int to real_t could introduce conversion error when the element_num // is large enough. if (param.ret_typ == topk_enum::kReturnMask) { Tensor<xpu, 1, DType> ret_mask = ret[0].FlatTo1D<xpu, DType>(s); ret_mask = scalar<DType>(0); sel_indices = reshape(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), Shape1(batch_size * k)); if (do_transpose) { mxnet::TShape src_shape = src.shape_.FlatTo3D(axis); CHECK_EQ(sel_indices.CheckContiguous(), true); sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } if (req[0] == kNullOp) { return; } else if (req[0] == kWriteTo) { mxnet_op::Kernel<fill_ind_to_one, xpu>::Launch(s, batch_size * k, sel_indices.dptr_, ret_mask.dptr_); } else { LOG(FATAL) << "req=" << req[0] << " is not supported yet."; } } else if (param.ret_typ == topk_enum::kReturnIndices) { if (do_transpose) { Tensor<xpu, 3, IDType> ret_indices = ret[0].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, IDType> ret_indices = ret[0].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } else { if (do_transpose) { Tensor<xpu, 3, DType> ret_value = ret[0].FlatTo3D<xpu, DType>(axis, axis, s); Tensor<xpu, 3, IDType> ret_indices = ret[1].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_value, req[0], transpose( slice<2>(inplace_reshape(sorted_dat, Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)), 0, k), Shape3(0, 2, 1))); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, DType> ret_value = ret[0].get_with_shape<xpu, 2, DType>(Shape2(batch_size, k), s); Tensor<xpu, 2, IDType> ret_indices = ret[1].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_value, req[0], slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k)); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } } template<typename xpu> void TopK(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnBoth) { MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { MXNET_NO_BFLOAT16_TYPE_SWITCH(param.dtype, IDType, { if (inputs[0].Size() >= INT_MAX) { TopKImpl<xpu, DType, IDType, index_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param); } else { TopKImpl<xpu, DType, IDType, int32_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param); } }); }); } else { MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { if (inputs[0].Size() >= INT_MAX) { TopKImpl<xpu, DType, index_t, index_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param); } else { TopKImpl<xpu, DType, index_t, int32_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param); } }); } } template<typename xpu> void Sort(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const SortParam& param = nnvm::get<SortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnValue; MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { if (inputs[0].Size() >= INT_MAX) { TopKImpl<xpu, DType, index_t, index_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param); } else { TopKImpl<xpu, DType, index_t, int32_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param); } }); } template<typename xpu> void ArgSort(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.dtype = param.dtype; topk_param.ret_typ = topk_enum::kReturnIndices; MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH(param.dtype, IDType, { if (inputs[0].Size() >= INT_MAX) { TopKImpl<xpu, DType, IDType, index_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param); } else { TopKImpl<xpu, DType, IDType, int32_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param); } }); }); } template<typename xpu, typename DType, typename IDType, typename IDXType> void TopKBackwardImpl(const OpContext &ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs, const TopKParam& param) { CHECK_NE(req[0], kWriteInplace); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.run_ctx.get_stream<xpu>(); CHECK(param.ret_typ == topk_enum::kReturnValue || param.ret_typ == topk_enum::kReturnBoth); size_t batch_size = 0; IDXType element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; IDXType k = 0; mxnet::TShape target_shape; ParseTopKParam(outputs[0].shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDXType>()) << "'IDType' does not have a sufficient precision to represent " << "the indices of the input array. The total element_num is " << element_num << ", but the selected index_t can only represent " << mxnet::common::MaxIntegerValue<IDXType>() << " elements"; Tensor<xpu, 1, IDXType> workspace = ctx.requested[0].get_space_typed<xpu, 1, IDXType>(Shape1(batch_size * k + batch_size), s); Tensor<xpu, 1, IDXType> sel_indices = Tensor<xpu, 1, IDXType>(workspace.dptr_, Shape1(batch_size * k), s); Tensor<xpu, 1, IDXType> batch_shift = Tensor<xpu, 1, IDXType>(workspace.dptr_ + batch_size * k, Shape1(batch_size), s); Tensor<xpu, 2, DType> out_grad = inputs[0].get_with_shape<xpu, 2, DType>(Shape2(inputs[0].shape_.Size(), 1), s); Tensor<xpu, 2, DType> in_grad = outputs[0].get_with_shape<xpu, 2, DType>(Shape2(outputs[0].shape_.Size(), 1), s); mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size, 1, IDXType{0}, element_num, kWriteTo, batch_shift.dptr_); if (do_transpose) { Tensor<xpu, 1, IDType> indices = inputs[2].FlatTo1D<xpu, IDType>(s); mxnet::TShape src_shape = outputs[0].shape_.FlatTo3D(axis); sel_indices = reshape(transpose( broadcast_to(inplace_reshape(batch_shift, Shape3(src_shape[0], src_shape[2], 1)), mxnet::TShape(Shape3(src_shape[0], src_shape[2], k))), Shape3(0, 2, 1)), Shape1(batch_size * k)); sel_indices += tcast<IDXType>(indices); sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } else { Tensor<xpu, 2, IDType> indices = inputs[2].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); sel_indices = reshape(tcast<IDXType>(indices) + broadcast_to(inplace_reshape(batch_shift, Shape2(batch_size, 1)), mxnet::TShape(Shape2(batch_size, k))), Shape1(batch_size * k)); } CHECK_EQ(sel_indices.CheckContiguous(), true); if (kWriteTo == req[0] || kAddTo == req[0]) { if (kWriteTo == req[0]) { in_grad = scalar<DType>(0); } mxnet_op::Kernel<fill_ind, xpu>::Launch(s, batch_size * k, sel_indices.dptr_, out_grad.dptr_, req[0], in_grad.dptr_); } else { LOG(FATAL) << "Not Implemented!"; } } template<typename xpu> void TopKBackward_(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnBoth) { MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH(param.dtype, IDType, { if (inputs[0].Size() >= INT_MAX) { TopKBackwardImpl<xpu, DType, IDType, index_t>(ctx, inputs, req, outputs, param); } else { TopKBackwardImpl<xpu, DType, IDType, int32_t>(ctx, inputs, req, outputs, param); } }); }); } else if (param.ret_typ == topk_enum::kReturnValue) { MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { if (inputs[0].Size() >= INT_MAX) { TopKBackwardImpl<xpu, DType, index_t, index_t>(ctx, inputs, req, outputs, param); } else { TopKBackwardImpl<xpu, DType, index_t, int32_t>(ctx, inputs, req, outputs, param); } }); } else { LOG(FATAL) << "Not Implemented"; } } inline uint32_t TopKNumOutputs(const NodeAttrs& attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { return static_cast<uint32_t>(1); } else { return static_cast<uint32_t>(2); } } inline uint32_t TopKNumVisibleOutputs(const NodeAttrs& attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnBoth) { return static_cast<uint32_t>(2); } else { return static_cast<uint32_t>(1); } } inline bool TopKType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); size_t in_size = in_attrs->size(); size_t out_size = out_attrs->size(); CHECK_EQ(in_size, 1); CHECK(out_size == 1 || out_size == 2); // out_attr[0] -> stores value // out_attr[1] -> stores indices if (out_size > 1) { if (param.ret_typ == topk_enum::kReturnValue) { #if MXNET_USE_INT64_TENSOR_SIZE == 1 CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt64)) #else CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32)) #endif << "Failed to set the type of ret_indices."; } else { CHECK(type_assign(&(*out_attrs)[1], param.dtype)) << "Failed to set the type of ret_indices."; } } if (param.ret_typ == topk_enum::kReturnIndices) { CHECK(type_assign(&(*out_attrs)[0], param.dtype)) << "Failed to set the type of ret_indices."; } else { TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0)); return out_attrs->at(0) != -1; } return true; } inline bool TopKShapeImpl(const TopKParam& param, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { CHECK_EQ(out_attrs->size(), 1U); } else { CHECK_EQ(out_attrs->size(), 2U); } mxnet::TShape& in_shape = (*in_attrs)[0]; size_t batch_size = 0; index_t element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; index_t k = 0; mxnet::TShape target_shape; ParseTopKParam(in_shape, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape); } else { SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape); SHAPE_ASSIGN_CHECK(*out_attrs, 1, target_shape); } return true; } inline bool TopKShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); return TopKShapeImpl(param, in_attrs, out_attrs); } inline bool SortType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { int data_type = -1; size_t in_size = in_attrs->size(); size_t out_size = out_attrs->size(); CHECK_EQ(in_size, 1); CHECK_EQ(out_size, 2); #if MXNET_USE_INT64_TENSOR_SIZE == 1 CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt64)) #else CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32)) #endif << "Failed to set the type of ret_indices"; CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]=" << (*in_attrs)[0]; CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]=" << (*out_attrs)[0]; CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]=" << (*in_attrs)[0]; CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]=" << (*out_attrs)[0]; if (data_type == -1) return false; return true; } inline bool SortShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const SortParam& param = nnvm::get<SortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnValue; return TopKShapeImpl(topk_param, in_attrs, out_attrs); } inline bool ArgSortType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); CHECK(type_assign(&(*out_attrs)[0], param.dtype)) << "Failed to set the type of ret_indices."; return true; } inline bool ArgSortShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnIndices; return TopKShapeImpl(topk_param, in_attrs, out_attrs); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
vednnActivationBackward.c
#include <stdio.h> #include <stdint.h> #include "vednnActivationBackward.h" #ifdef VEDNN_USE_OPENMP #include <stdint.h> #include <omp.h> extern int __vednn_omp_num_threads ; #endif static inline vednnError_t vednnActivationBackward_wrapper( vednnActivationBackward_t pFunc, const void *pDataGradOut, const void *pDataIn, void *pDataGradIn, const uint64_t nElements ) { #ifdef VEDNN_USE_OPENMP if ( __vednn_omp_num_threads == 1 ) { return pFunc(pDataGradOut, pDataIn, pDataGradIn, nElements) ; } else { vednnError_t rc = VEDNN_SUCCESS ; #pragma omp parallel reduction(|:rc) { int64_t nthreads = omp_get_num_threads() ; int64_t threadid = omp_get_thread_num() ; int64_t eachNElement = nElements / nthreads ; int64_t remain = nElements % nthreads ; int64_t elementBegin = eachNElement * threadid + ( threadid < remain ? threadid : remain ) ; int64_t myElement = eachNElement + ( threadid < remain ? 1 : 0 ) ; if( myElement == 0 ) { rc |= VEDNN_SUCCESS ; } else { float* _pDataGradOut = ((float *)pDataGradOut) + elementBegin ; float* _pDataIn = ((float *)pDataIn) + elementBegin ; float* _pDataGradIn = ((float *)pDataGradIn) + elementBegin ; rc |= pFunc((void*)_pDataGradOut, (void*)_pDataIn, (void*) _pDataGradIn, myElement) ; } } return rc ; } #else return pFunc(pDataGradOut, pDataIn, pDataGradIn, nElements) ; #endif } /* ----------------------------------------------------------------------- */ vednnError_t vednnActivationBackward( const vednnActivationMode_t mode, const void *pDataGradOut, const void *pDataIn, void *pDataGradIn, const uint64_t nElements ) { switch(mode) { case VEDNN_ACTIVATION_RELU : return vednnActivationBackward_wrapper( vednnActivationBackward_Relu, pDataGradOut, pDataIn, pDataGradIn, nElements ) ; default : fprintf(stderr, "VEDNN Error : vednnActivationBackward : Invalid Parameter !!\n") ; return VEDNN_ERROR_INVALID_PARAM ; } }
vla_crash.c
// RUN: %clang_cc1 -verify -triple powerpc64le-unknown-linux-gnu -fopenmp -x c -emit-llvm %s -o - | FileCheck %s // RUN: %clang_cc1 -verify -triple powerpc64le-unknown-linux-gnu -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} int a; // CHECK-LABEL: foo void foo() { int(*b)[a]; int *(**c)[a]; // CHECK: [[B:%.+]] = alloca i32*, // CHECK: [[C:%.+]] = alloca i32***, // CHECK: @__kmpc_global_thread_num // CHECK: call void @__kmpc_serialized_parallel // CHECK: call void [[OUTLINED:@[^(]+]](i32* %{{[^,]+}}, i32* %{{[^,]+}}, i64 %{{[^,]+}}, i32** [[B]], i64 %{{[^,]+}}, i32**** [[C]]) // CHECK: call void @__kmpc_end_serialized_parallel // CHECK: ret void #pragma omp parallel if (0) b[0][0] = c[0][a][0][a]; } // CHECK: define internal void [[OUTLINED]](i32* {{[^,]+}}, i32* {{[^,]+}}, i64 {{[^,]+}}, i32** {{[^,]+}}, i64 {{[^,]+}}, i32**** {{[^,]+}}) // CHECK-LABEL: bar void bar(int n, int *a) { // CHECK: [[N:%.+]] = alloca i32, // CHECK: [[A:%.+]] = alloca i32*, // CHECK: [[P:%.+]] = alloca i32*, // CHECK: @__kmpc_global_thread_num // CHECK: [[BC:%.+]] = bitcast i32** [[A]] to i32* // CHECK: store i32* [[BC]], i32** [[P]], // CHECK: call void @__kmpc_serialized_parallel // CHECK: call void [[OUTLINED:@[^(]+]](i32* %{{[^,]+}}, i32* %{{[^,]+}}, i64 %{{[^,]+}}, i32** [[P]], i32** [[A]]) // CHECK: call void @__kmpc_end_serialized_parallel // CHECK: ret void // expected-warning@+1 {{incompatible pointer types initializing 'int (*)[n]' with an expression of type 'int **'}} int(*p)[n] = &a; #pragma omp parallel if(0) // expected-warning@+1 {{comparison of distinct pointer types ('int (*)[n]' and 'int **')}} if (p == &a) { } } // CHECK: define internal void [[OUTLINED]](i32* {{[^,]+}}, i32* {{[^,]+}}, i64 {{[^,]+}}, i32** {{[^,]+}}, i32** {{[^,]+}})
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 8; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(8*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(4*t1+Ny+5,8)),floord(8*t2+Ny+4,8)),floord(8*t1-8*t2+Nz+Ny+3,8));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(8*t2-Nz-508,512)),ceild(8*t3-Ny-508,512));t4<=min(min(min(min(floord(Nt+Nx-4,512),floord(4*t1+Nx+5,512)),floord(8*t2+Nx+4,512)),floord(8*t3+Nx+4,512)),floord(8*t1-8*t2+Nz+Nx+3,512));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),8*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),8*t3+6),512*t4+510),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(512*t4,t5+1); ubv=min(512*t4+511,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
BlockHandlerAVX.h
// // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE.md file in the project root for full licence information. // #pragma once #include "BlockMultiplierPlatform.h" #include <immintrin.h> #include <emmintrin.h> #include <assert.h> #include <cstdint> #define FOR_CNTK #ifdef FOR_CNTK #include "CommonMatrix.h" #endif namespace Microsoft { namespace MSR { namespace CNTK { class MATH_API BlockHandlerAVX { private: //USE SSE for the blocks of 8, borrowed from BlockHandlerSSE FORCEINLINE static void kernelsse8x4(__m128i xmmRow0, __m128i xmmRow1, __m128i xmmRow2, __m128i xmmRow3, short* B, __m128i* return1, __m128i* return2, __m128i* return3, __m128i* return4); FORCEINLINE static void kernelavx16x4(__m256i xmmRow0B0a, __m256i xmmRow1B0a, __m256i xmmRow2B0a, __m256i xmmRow3B0a, short* B, __m256i* return1, __m256i* return2, __m256i * return3, __m256i* return4); FORCEINLINE static void kernelavx32x4( __m256i xmmRow0B0a, __m256i xmmRow0B0b, __m256i xmmRow1B0a, __m256i xmmRow1B0b, __m256i xmmRow2B0a, __m256i xmmRow2B0b, __m256i xmmRow3B0a, __m256i xmmRow3B0b, short* B, __m256i* return1, __m256i* return2, __m256i * return3, __m256i* return4); FORCEINLINE static void kernelavx64x4( __m256i xmmRow0B0a, __m256i xmmRow0B0b, __m256i xmmRow0B0c, __m256i xmmRow0B0d, __m256i xmmRow1B0a, __m256i xmmRow1B0b, __m256i xmmRow1B0c, __m256i xmmRow1B0d, __m256i xmmRow2B0a, __m256i xmmRow2B0b, __m256i xmmRow2B0c, __m256i xmmRow2B0d, __m256i xmmRow3B0a, __m256i xmmRow3B0b, __m256i xmmRow3B0c, __m256i xmmRow3B0d, short* B, __m256i* return1, __m256i* return2, __m256i * return3, __m256i* return4); FORCEINLINE static void kernelavx128x4( __m256i xmmRow0B0a, __m256i xmmRow0B0b, __m256i xmmRow0B0c, __m256i xmmRow0B0d, __m256i xmmRow0B0e, __m256i xmmRow0B0f, __m256i xmmRow0B0g, __m256i xmmRow0B0h, __m256i xmmRow1B0a, __m256i xmmRow1B0b, __m256i xmmRow1B0c, __m256i xmmRow1B0d, __m256i xmmRow1B0e, __m256i xmmRow1B0f, __m256i xmmRow1B0g, __m256i xmmRow1B0h, __m256i xmmRow2B0a, __m256i xmmRow2B0b, __m256i xmmRow2B0c, __m256i xmmRow2B0d, __m256i xmmRow2B0e, __m256i xmmRow2B0f, __m256i xmmRow2B0g, __m256i xmmRow2B0h, __m256i xmmRow3B0a, __m256i xmmRow3B0b, __m256i xmmRow3B0c, __m256i xmmRow3B0d, __m256i xmmRow3B0e, __m256i xmmRow3B0f, __m256i xmmRow3B0g, __m256i xmmRow3B0h, short* B, __m256i* return1, __m256i* return2, __m256i* return3, __m256i* return4); FORCEINLINE static void kernelsse8x1(__m128i xmmRow0, short* B, __m128i* return1); FORCEINLINE static void kernelavx16x1(__m256i xmmRow0B0a, short* B, __m256i* return1 ); FORCEINLINE static void kernelavx32x1( __m256i xmmRow0B0a, __m256i xmmRow0B0b, short* B, __m256i* return1); FORCEINLINE static void kernelavx64x1( __m256i xmmRow0B0a, __m256i xmmRow0B0b, __m256i xmmRow0B0c, __m256i xmmRow0B0d, short* B, __m256i* return1) ; FORCEINLINE static void kernelavx128x1( __m256i xmmRow0B0a, __m256i xmmRow0B0b, __m256i xmmRow0B0c, __m256i xmmRow0B0d, __m256i xmmRow0B0e, __m256i xmmRow0B0f, __m256i xmmRow0B0g, __m256i xmmRow0B0h, short* B, __m256i* return1); //TODO: Should these be refactored somewhere else? Any BlockHandler will need access to these functions. //Separate class with static functions? Maybe move the Block rewriting functions as well as these to a new //static class. static int RowToColOffsetRewrittenB(int col, int kOffset, int blockSize, int origCols); static int RowToColOffsetRewrittenA(int row, int kOffset, int blockSize, int rowsPerBlock, int origCols); static void DumpM256(__m256i dumpMe); public: typedef __m256i VectorT; typedef int16_t ScalarAT; typedef int16_t ScalarBT; typedef int32_t ScalarCT; FORCEINLINE static void HandleBlock8x4(int currBlock, int startRow, int k, int n, short* newA, short* B, int blockCnt, __m128i* resultStorage); FORCEINLINE static void HandleBlock32x4(int currBlock, int startRow, int k, int n, short* newA, short* B, int blockCnt, __m256i* resultStorage); FORCEINLINE static void HandleBlock64x4(int currBlock, int startRow, int k, int n, short* newA, short* B, int blockCnt, __m256i* resultStorage); FORCEINLINE static void HandleBlock128x4(int currBlock, int startRow, int k, int n, short* newA, short* B, int blockCnt, __m256i* resultStorage, VectorT* subtractMe); FORCEINLINE static void HandleBlock8x1(int currBlock, int startRow, int k, int n, short* newA, short* B, int blockCnt, __m128i* resultStorage); FORCEINLINE static void HandleBlock16x1(int currBlock, int startRow, int k, int n, short* newA, short* B, int blockCnt, __m256i* resultStorage); FORCEINLINE static void HandleBlock64x1(int currBlock, int startRow, int k, int n, short* newA, short* B, int blockCnt, __m256i* resultStorage); FORCEINLINE static void HandleBlock128x1(int currBlock, int startRow, int k, int n, short* newA, short* B, int blockCnt, __m256i* resultStorage, VectorT* subtractMe); FORCEINLINE static void HandleBlock16x4(int currBlock, int startRow, int k, int n, short* newA, short* B, int blockCnt, __m256i* resultStorage); //FORCEINLINE static void HandleBlock128x4(int currBlock, int startRow, int m, int k, int n, short* newA, short* B, FORCEINLINE static void HandleBlock32x1(int currBlock, int startRow, int k, int n, short* newA, short* B, int blockCnt, __m256i* resultStorage); static VectorT* PrepareExtraB(const ScalarBT* /*prepareMe*/, int /*k*/, int /*n*/) { return nullptr; } static void FreePreparedB(VectorT* freeMe) { freeMe; assert(nullptr == freeMe); } }; #define LOADAVX2_128x4 \ __m256i r0b0a2 = _mm256_load_si256((__m256i*)currA2); \ __m256i r0b0b2 = _mm256_load_si256((__m256i*)(currA2 + 16)); \ __m256i r0b0c2 = _mm256_load_si256((__m256i*)(currA2 + 32)); \ __m256i r0b0d2 = _mm256_load_si256((__m256i*)(currA2 + 48)); \ __m256i r0b0e2 = _mm256_load_si256((__m256i*)(currA2 + 64)); \ __m256i r0b0f2 = _mm256_load_si256((__m256i*)(currA2 + 80)); \ __m256i r0b0g2 = _mm256_load_si256((__m256i*)(currA2 + 96)); \ __m256i r0b0h2 = _mm256_load_si256((__m256i*)(currA2 + 112));\ \ __m256i r1b0a2 = _mm256_load_si256((__m256i*)(currA2 + 128));\ __m256i r1b0b2 = _mm256_load_si256((__m256i*)(currA2 + 144));\ __m256i r1b0c2 = _mm256_load_si256((__m256i*)(currA2 + 160));\ __m256i r1b0d2 = _mm256_load_si256((__m256i*)(currA2 + 176));\ __m256i r1b0e2 = _mm256_load_si256((__m256i*)(currA2 + 192));\ __m256i r1b0f2 = _mm256_load_si256((__m256i*)(currA2 + 208));\ __m256i r1b0g2 = _mm256_load_si256((__m256i*)(currA2 + 224));\ __m256i r1b0h2 = _mm256_load_si256((__m256i*)(currA2 + 240));\ \ __m256i r2b0a2 = _mm256_load_si256((__m256i*)(currA2 + 256));\ __m256i r2b0b2 = _mm256_load_si256((__m256i*)(currA2 + 272));\ __m256i r2b0c2 = _mm256_load_si256((__m256i*)(currA2 + 288));\ __m256i r2b0d2 = _mm256_load_si256((__m256i*)(currA2 + 304));\ __m256i r2b0e2 = _mm256_load_si256((__m256i*)(currA2 + 320));\ __m256i r2b0f2 = _mm256_load_si256((__m256i*)(currA2 + 336));\ __m256i r2b0g2 = _mm256_load_si256((__m256i*)(currA2 + 352));\ __m256i r2b0h2 = _mm256_load_si256((__m256i*)(currA2 + 368));\ \ __m256i r3b0a2 = _mm256_load_si256((__m256i*)(currA2 + 384));\ __m256i r3b0b2 = _mm256_load_si256((__m256i*)(currA2 + 400));\ __m256i r3b0c2 = _mm256_load_si256((__m256i*)(currA2 + 416));\ __m256i r3b0d2 = _mm256_load_si256((__m256i*)(currA2 + 432));\ __m256i r3b0e2 = _mm256_load_si256((__m256i*)(currA2 + 448));\ __m256i r3b0f2 = _mm256_load_si256((__m256i*)(currA2 + 464));\ __m256i r3b0g2 = _mm256_load_si256((__m256i*)(currA2 + 480));\ __m256i r3b0h2 = _mm256_load_si256((__m256i*)(currA2 + 496));\ #define LOADAVX2_128x1 \ __m256i r0b0a2 = _mm256_load_si256((__m256i*)currA2); \ __m256i r0b0b2 = _mm256_load_si256((__m256i*)(currA2 + 16)); \ __m256i r0b0c2 = _mm256_load_si256((__m256i*)(currA2 + 32)); \ __m256i r0b0d2 = _mm256_load_si256((__m256i*)(currA2 + 48)); \ __m256i r0b0e2 = _mm256_load_si256((__m256i*)(currA2 + 64)); \ __m256i r0b0f2 = _mm256_load_si256((__m256i*)(currA2 + 80)); \ __m256i r0b0g2 = _mm256_load_si256((__m256i*)(currA2 + 96)); \ __m256i r0b0h2 = _mm256_load_si256((__m256i*)(currA2 + 112)); #define LOADAVX_128x1 \ __m256i r0b0a = _mm256_load_si256((__m256i*)currA); \ __m256i r0b0b = _mm256_load_si256((__m256i*)(currA + 16)); \ __m256i r0b0c = _mm256_load_si256((__m256i*)(currA + 32)); \ __m256i r0b0d = _mm256_load_si256((__m256i*)(currA + 48)); \ __m256i r0b0e = _mm256_load_si256((__m256i*)(currA + 64)); \ __m256i r0b0f = _mm256_load_si256((__m256i*)(currA + 80)); \ __m256i r0b0g = _mm256_load_si256((__m256i*)(currA + 96)); \ __m256i r0b0h = _mm256_load_si256((__m256i*)(currA + 112)); #define LOADAVX_128x4 \ __m256i r0b0a = _mm256_load_si256((__m256i*)currA); \ __m256i r0b0b = _mm256_load_si256((__m256i*)(currA + 16)); \ __m256i r0b0c = _mm256_load_si256((__m256i*)(currA + 32)); \ __m256i r0b0d = _mm256_load_si256((__m256i*)(currA + 48)); \ __m256i r0b0e = _mm256_load_si256((__m256i*)(currA + 64)); \ __m256i r0b0f = _mm256_load_si256((__m256i*)(currA + 80)); \ __m256i r0b0g = _mm256_load_si256((__m256i*)(currA + 96)); \ __m256i r0b0h = _mm256_load_si256((__m256i*)(currA + 112));\ \ __m256i r1b0a = _mm256_load_si256((__m256i*)(currA + 128));\ __m256i r1b0b = _mm256_load_si256((__m256i*)(currA + 144));\ __m256i r1b0c = _mm256_load_si256((__m256i*)(currA + 160));\ __m256i r1b0d = _mm256_load_si256((__m256i*)(currA + 176));\ __m256i r1b0e = _mm256_load_si256((__m256i*)(currA + 192));\ __m256i r1b0f = _mm256_load_si256((__m256i*)(currA + 208));\ __m256i r1b0g = _mm256_load_si256((__m256i*)(currA + 224));\ __m256i r1b0h = _mm256_load_si256((__m256i*)(currA + 240));\ \ __m256i r2b0a = _mm256_load_si256((__m256i*)(currA + 256));\ __m256i r2b0b = _mm256_load_si256((__m256i*)(currA + 272));\ __m256i r2b0c = _mm256_load_si256((__m256i*)(currA + 288));\ __m256i r2b0d = _mm256_load_si256((__m256i*)(currA + 304));\ __m256i r2b0e = _mm256_load_si256((__m256i*)(currA + 320));\ __m256i r2b0f = _mm256_load_si256((__m256i*)(currA + 336));\ __m256i r2b0g = _mm256_load_si256((__m256i*)(currA + 352));\ __m256i r2b0h = _mm256_load_si256((__m256i*)(currA + 368));\ \ __m256i r3b0a = _mm256_load_si256((__m256i*)(currA + 384));\ __m256i r3b0b = _mm256_load_si256((__m256i*)(currA + 400));\ __m256i r3b0c = _mm256_load_si256((__m256i*)(currA + 416));\ __m256i r3b0d = _mm256_load_si256((__m256i*)(currA + 432));\ __m256i r3b0e = _mm256_load_si256((__m256i*)(currA + 448));\ __m256i r3b0f = _mm256_load_si256((__m256i*)(currA + 464));\ __m256i r3b0g = _mm256_load_si256((__m256i*)(currA + 480));\ __m256i r3b0h = _mm256_load_si256((__m256i*)(currA + 496));\ #define LOADAVX_64x4 \ __m256i r0b0a = _mm256_load_si256((__m256i*)currA);\ __m256i r0b0b = _mm256_load_si256((__m256i*)currA + 1);\ __m256i r0b0c = _mm256_load_si256((__m256i*)currA + 2);\ __m256i r0b0d = _mm256_load_si256((__m256i*)currA + 3);\ \ __m256i r1b0a = _mm256_load_si256((__m256i*)currA + 4);\ __m256i r1b0b = _mm256_load_si256((__m256i*)currA + 5);\ __m256i r1b0c = _mm256_load_si256((__m256i*)currA + 6);\ __m256i r1b0d = _mm256_load_si256((__m256i*)currA + 7);\ \ __m256i r2b0a = _mm256_load_si256((__m256i*)currA + 8);\ __m256i r2b0b = _mm256_load_si256((__m256i*)currA + 9);\ __m256i r2b0c = _mm256_load_si256((__m256i*)currA + 10);\ __m256i r2b0d = _mm256_load_si256((__m256i*)currA + 11);\ \ __m256i r3b0a = _mm256_load_si256((__m256i*)currA + 12);\ __m256i r3b0b = _mm256_load_si256((__m256i*)currA + 13);\ __m256i r3b0c = _mm256_load_si256((__m256i*)currA + 14);\ __m256i r3b0d = _mm256_load_si256((__m256i*)currA + 15); #define LOADAVX_64x1 \ __m256i r0b0a = _mm256_load_si256((__m256i*)currA); \ __m256i r0b0b = _mm256_load_si256((__m256i*)currA + 1); \ __m256i r0b0c = _mm256_load_si256((__m256i*)currA + 2); \ __m256i r0b0d = _mm256_load_si256((__m256i*)currA + 3); #define LOADAVX_32x4 \ __m256i r0b0a = _mm256_load_si256((__m256i*)currA); \ __m256i r0b0b = _mm256_load_si256((__m256i*)currA + 1); \ \ __m256i r1b0a = _mm256_load_si256((__m256i*)currA + 2);\ __m256i r1b0b = _mm256_load_si256((__m256i*)currA + 3);\ \ __m256i r2b0a = _mm256_load_si256((__m256i*)currA + 4);\ __m256i r2b0b = _mm256_load_si256((__m256i*)currA + 5);\ \ __m256i r3b0a = _mm256_load_si256((__m256i*)currA + 6);\ __m256i r3b0b = _mm256_load_si256((__m256i*)currA + 7);\ #define LOADAVX_32x1 \ __m256i r0b0a = _mm256_load_si256((__m256i*)currA); \ __m256i r0b0b = _mm256_load_si256((__m256i*)currA + 1); #define LOADAVX_16x4 \ __m256i r0b0a = _mm256_load_si256((__m256i*)currA);\ __m256i r1b0a = _mm256_load_si256((__m256i*)currA + 1);\ __m256i r2b0a = _mm256_load_si256((__m256i*)currA + 2);\ __m256i r3b0a = _mm256_load_si256((__m256i*)currA + 3);\ #define LOADAVX_16x1 \ __m256i r0b0a = _mm256_load_si256((__m256i*)currA); #define LOAD_8x4 \ __m128i r0b0a = _mm_load_si128((__m128i*)currA);\ __m128i r1b0a = _mm_load_si128((__m128i*)currA + 1);\ __m128i r2b0a = _mm_load_si128((__m128i*)currA + 2);\ __m128i r3b0a = _mm_load_si128((__m128i*)currA + 3);\ #define LOAD_8x1 \ __m128i r0b0a = _mm_load_si128((__m128i*)currA); FORCEINLINE void BlockHandlerAVX::HandleBlock8x4(int currBlock, int startRow, int k, int n, short* newA, short* B, int blockCnt, __m128i* resultStorage) { blockCnt; //warning 4100 int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 8, 4, k); short* currA = &newA[aOffset]; LOAD_8x4; for (int c = 0; c < n; ++c) { short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 8, n)]; __m128i accum1 = _mm_set_epi32(0, 0, 0, 0); __m128i accum2 = _mm_set_epi32(0, 0, 0, 0); __m128i accum3 = _mm_set_epi32(0, 0, 0, 0); __m128i accum4 = _mm_set_epi32(0, 0, 0, 0); kernelsse8x4(r0b0a, r1b0a, r2b0a, r3b0a, currB, &accum1, &accum2, &accum3, &accum4); resultStorage[RowColToOffset(0, c, n)] = _mm_add_epi32(resultStorage[RowColToOffset(0, c, n)], accum1); resultStorage[RowColToOffset(1, c, n)] = _mm_add_epi32(resultStorage[RowColToOffset(1, c, n)], accum2); resultStorage[RowColToOffset(2, c, n)] = _mm_add_epi32(resultStorage[RowColToOffset(2, c, n)], accum3); resultStorage[RowColToOffset(3, c, n)] = _mm_add_epi32(resultStorage[RowColToOffset(3, c, n)], accum4); } } FORCEINLINE void BlockHandlerAVX::HandleBlock8x1(int currBlock, int startRow, int k, int n, short* newA, short* B, int /*blockCnt*/, __m128i* resultStorage) { int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 8, 4, k); short* currA = &newA[aOffset]; LOAD_8x1; for (int c = 0; c < n; ++c) { short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 8, n)]; __m128i accum1 = _mm_set_epi32(0, 0, 0, 0); kernelsse8x1(r0b0a, currB, &accum1); resultStorage[RowColToOffset(0, c, n)] = _mm_add_epi32(resultStorage[RowColToOffset(0, c, n)], accum1); } } FORCEINLINE void BlockHandlerAVX::HandleBlock16x4(int currBlock, int startRow, int k, int n, short* newA, short* B, int /*blockCnt*/, __m256i* resultStorage) { int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 16, 4, k); short* currA = &newA[aOffset]; LOADAVX_16x4; //#pragma omp parallel for for (int c = 0; c < n; ++c) { short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 16, n)]; //The gain comes when we have all the row values loaded up //together and we multiply them all times each column, saving m_rowsPerBlock column //loads. __m256i accum1 = _mm256_set1_epi16(0); __m256i accum2 = _mm256_set1_epi16(0); __m256i accum3 = _mm256_set1_epi16(0); __m256i accum4 = _mm256_set1_epi16(0); kernelavx16x4(r0b0a, r1b0a, r2b0a, r3b0a, currB, &accum1, &accum2, &accum3, &accum4); resultStorage[RowColToOffset(0, c, n)] = _mm256_add_epi32(resultStorage[RowColToOffset(0, c, n)], accum1); resultStorage[RowColToOffset(1, c, n)] = _mm256_add_epi32(resultStorage[RowColToOffset(1, c, n)], accum2); resultStorage[RowColToOffset(2, c, n)] = _mm256_add_epi32(resultStorage[RowColToOffset(2, c, n)], accum3); resultStorage[RowColToOffset(3, c, n)] = _mm256_add_epi32(resultStorage[RowColToOffset(3, c, n)], accum4); } } FORCEINLINE void BlockHandlerAVX::HandleBlock16x1(int currBlock, int startRow, int k, int n, short* newA, short* B, int /*blockCnt*/, __m256i* resultStorage) { int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 16, 1, k); short* currA = &newA[aOffset]; LOADAVX_16x1; //#pragma omp parallel for for (int c = 0; c < n; ++c) { short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 16, n)]; //The gain comes when we have all the row values loaded up //together and we multiply them all times each column, saving m_rowsPerBlock column //loads. __m256i accum1 = _mm256_set1_epi16(0); kernelavx16x1(r0b0a, currB, &accum1); resultStorage[RowColToOffset(0, c, n)] = _mm256_add_epi32(resultStorage[RowColToOffset(0, c, n)], accum1); } } FORCEINLINE void BlockHandlerAVX::HandleBlock32x4(int currBlock, int startRow, int k, int n, short* newA, short* B, int /*blockCnt*/, __m256i* resultStorage) { int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 32, 4, k); short* currA = &newA[aOffset]; LOADAVX_32x4; //#pragma omp parallel for for (int c = 0; c < n; ++c) { short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 32, n)]; //The gain comes when we have all the row values loaded up //together and we multiply them all times each column, saving m_rowsPerBlock column //loads. __m256i accum1 = _mm256_set1_epi16(0); __m256i accum2 = _mm256_set1_epi16(0); __m256i accum3 = _mm256_set1_epi16(0); __m256i accum4 = _mm256_set1_epi16(0); kernelavx32x4( r0b0a, r0b0b, r1b0a, r1b0b, r2b0a, r2b0b, r3b0a, r3b0b, currB, &accum1, &accum2, &accum3, &accum4); resultStorage[RowColToOffset(0, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(0, c, n)], accum1); resultStorage[RowColToOffset(1, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(1, c, n)], accum2); resultStorage[RowColToOffset(2, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(2, c, n)], accum3); resultStorage[RowColToOffset(3, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(3, c, n)], accum4); } } FORCEINLINE void BlockHandlerAVX::HandleBlock32x1(int currBlock, int startRow, int k, int n, short* newA, short* B, int /*blockCnt*/, __m256i* resultStorage) { int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 32, 1, k); short* currA = &newA[aOffset]; LOADAVX_32x1; //#pragma omp parallel for for (int c = 0; c < n; ++c) { short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 32, n)]; __m256i accum1 = _mm256_set1_epi16(0); kernelavx32x1( r0b0a, r0b0b, currB, &accum1); resultStorage[RowColToOffset(0, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(0, c, n)], accum1); } } FORCEINLINE void BlockHandlerAVX::HandleBlock64x4(int currBlock, int startRow, int k, int n, short* newA, short* B, int /*blockCnt*/, __m256i* resultStorage) { int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 64, 4, k); short* currA = &newA[aOffset]; LOADAVX_64x4; //#pragma omp parallel for for (int c = 0; c < n; ++c) { short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 64, n)]; //The gain comes when we have all the row values loaded up //together and we multiply them all times each column, saving m_rowsPerBlock column //loads. __m256i accum1 = _mm256_set1_epi16(0); __m256i accum2 = _mm256_set1_epi16(0); __m256i accum3 = _mm256_set1_epi16(0); __m256i accum4 = _mm256_set1_epi16(0); kernelavx64x4( r0b0a, r0b0b, r0b0c, r0b0d, r1b0a, r1b0b, r1b0c, r1b0d, r2b0a, r2b0b, r2b0c, r2b0d, r3b0a, r3b0b, r3b0c, r3b0d, currB, &accum1, &accum2, &accum3, &accum4); resultStorage[RowColToOffset(0, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(0, c, n)], accum1); resultStorage[RowColToOffset(1, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(1, c, n)], accum2); resultStorage[RowColToOffset(2, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(2, c, n)], accum3); resultStorage[RowColToOffset(3, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(3, c, n)], accum4); } } FORCEINLINE void BlockHandlerAVX::HandleBlock64x1(int currBlock, int startRow, int k, int n, short* newA, short* B, int /*blockCnt*/, __m256i* resultStorage) { int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 64, 4, k); short* currA = &newA[aOffset]; LOADAVX_64x1; //#pragma omp parallel for for (int c = 0; c < n; ++c) { short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 64, n)]; //The gain comes when we have all the row values loaded up //together and we multiply them all times each column, saving m_rowsPerBlock column //loads. __m256i accum1 = _mm256_set1_epi16(0); kernelavx64x1( r0b0a, r0b0b, r0b0c, r0b0d, currB, &accum1); resultStorage[RowColToOffset(0, c, n)] = _mm256_add_epi32(resultStorage[RowColToOffset(0, c, n)], accum1); } } FORCEINLINE void BlockHandlerAVX::HandleBlock128x4(int currBlock, int startRow, int k, int n, short* newA, short* B, int blockCnt, __m256i* resultStorage, VectorT* /*subtractMe*/) { int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 128, 4, k); int aOffset2 = RowToColOffsetRewrittenA(startRow, currBlock + 1, 128, 4, k); short* currA = &newA[aOffset]; short* currA2 = &newA[aOffset2]; LOADAVX_128x4; LOADAVX2_128x4; //#pragma omp parallel for for (int c = 0; c < n; ++c) { short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 128, n)]; short* currB2 = &B[RowToColOffsetRewrittenB(c, currBlock + 1, 128, n)]; //The gain comes when we have all the row values loaded up //together and we multiply them all times each column, saving m_rowsPerBlock column //loads. __m256i accum1 = _mm256_set1_epi16(0); __m256i accum2 = _mm256_set1_epi16(0); __m256i accum3 = _mm256_set1_epi16(0); __m256i accum4 = _mm256_set1_epi16(0); __m256i accum5 = _mm256_set1_epi16(0); __m256i accum6 = _mm256_set1_epi16(0); __m256i accum7 = _mm256_set1_epi16(0); __m256i accum8 = _mm256_set1_epi16(0); kernelavx128x4( r0b0a, r0b0b, r0b0c, r0b0d, r0b0e, r0b0f, r0b0g, r0b0h, r1b0a, r1b0b, r1b0c, r1b0d, r1b0e, r1b0f, r1b0g, r1b0h, r2b0a, r2b0b, r2b0c, r2b0d, r2b0e, r2b0f, r2b0g, r2b0h, r3b0a, r3b0b, r3b0c, r3b0d, r3b0e, r3b0f, r3b0g, r3b0h, currB, &accum1, &accum2, &accum3, &accum4); if (blockCnt > 1) { kernelavx128x4( r0b0a2, r0b0b2, r0b0c2, r0b0d2, r0b0e2, r0b0f2, r0b0g2, r0b0h2, r1b0a2, r1b0b2, r1b0c2, r1b0d2, r1b0e2, r1b0f2, r1b0g2, r1b0h2, r2b0a2, r2b0b2, r2b0c2, r2b0d2, r2b0e2, r2b0f2, r2b0g2, r2b0h2, r3b0a2, r3b0b2, r3b0c2, r3b0d2, r3b0e2, r3b0f2, r3b0g2, r3b0h2, currB2, &accum5, &accum6, &accum7, &accum8); } resultStorage[RowColToOffset(0, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(0, c, n)], _mm256_add_epi32(accum1, accum5)); resultStorage[RowColToOffset(1, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(1, c, n)], _mm256_add_epi32(accum2, accum6)); resultStorage[RowColToOffset(2, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(2, c, n)], _mm256_add_epi32(accum3, accum7)); resultStorage[RowColToOffset(3, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(3, c, n)], _mm256_add_epi32(accum4, accum8)); } } FORCEINLINE void BlockHandlerAVX::HandleBlock128x1(int currBlock, int startRow, int k, int n, short* newA, short* B, int blockCnt, __m256i* resultStorage, VectorT* /*subtractMe*/) { int aOffset = RowToColOffsetRewrittenA(startRow, currBlock, 128, 4, k); int aOffset2 = RowToColOffsetRewrittenA(startRow, currBlock + 1, 128, 4, k); short* currA = &newA[aOffset]; short* currA2 = &newA[aOffset2]; LOADAVX_128x1; LOADAVX2_128x1; //#pragma omp parallel for for (int c = 0; c < n; ++c) { short* currB = &B[RowToColOffsetRewrittenB(c, currBlock, 128, n)]; short* currB2 = &B[RowToColOffsetRewrittenB(c, currBlock + 1, 128, n)]; //The gain comes when we have all the row values loaded up //together and we multiply them all times each column, saving m_rowsPerBlock column //loads. __m256i accum1 = _mm256_set1_epi16(0); __m256i accum2 = _mm256_set1_epi16(0); kernelavx128x1( r0b0a, r0b0b, r0b0c, r0b0d, r0b0e, r0b0f, r0b0g, r0b0h, currB, &accum1); if (blockCnt > 1) { kernelavx128x1( r0b0a2, r0b0b2, r0b0c2, r0b0d2, r0b0e2, r0b0f2, r0b0g2, r0b0h2, currB2, &accum1); } resultStorage[RowColToOffset(0, c, n)] = _mm256_add_epi32( resultStorage[RowColToOffset(0, c, n)], _mm256_add_epi32(accum1, accum2)); } } FORCEINLINE void BlockHandlerAVX::kernelsse8x1(__m128i xmmRow0, short* B, __m128i* return1) { __m128i xmmCol0 = _mm_load_si128((__m128i*)B); __m128i result1 = _mm_madd_epi16(xmmRow0, xmmCol0); *return1 = result1; } FORCEINLINE void BlockHandlerAVX::kernelsse8x4(__m128i xmmRow0, __m128i xmmRow1, __m128i xmmRow2, __m128i xmmRow3, short* B, __m128i* return1, __m128i* return2, __m128i* return3, __m128i* return4) { __m128i xmmCol0 = _mm_load_si128((__m128i*)B); __m128i result1 = _mm_madd_epi16(xmmRow0, xmmCol0); __m128i result2 = _mm_madd_epi16(xmmRow1, xmmCol0); __m128i result3 = _mm_madd_epi16(xmmRow2, xmmCol0); __m128i result4 = _mm_madd_epi16(xmmRow3, xmmCol0); *return1 = result1; *return2 = result2; *return3 = result3; *return4 = result4; } FORCEINLINE void BlockHandlerAVX::kernelavx16x1(__m256i xmmRow0B0a, short* B, __m256i* return1) { __m256i xmmCol0B0a = _mm256_load_si256((__m256i*)B); //Result for row 0 //Nomenclature: //r0b0axc0b0a means "Row zero block zero part A times column zero block zero part A. (Blocks > 8 take up > 1 __m256i each (xmm registers)) __m256i r0b0axc0b0a = _mm256_madd_epi16(xmmRow0B0a, xmmCol0B0a); *return1 = r0b0axc0b0a; } FORCEINLINE void BlockHandlerAVX::kernelavx16x4(__m256i xmmRow0B0a, __m256i xmmRow1B0a, __m256i xmmRow2B0a, __m256i xmmRow3B0a, short* B, __m256i* return1, __m256i* return2, __m256i * return3, __m256i* return4) { __m256i xmmCol0B0a = _mm256_load_si256((__m256i*)B); //Result for row 0 //Nomenclature: //r0b0axc0b0a means "Row zero block zero part A times column zero block zero part A. (Blocks > 8 take up > 1 __m256i each (xmm registers)) __m256i r0b0axc0b0a = _mm256_madd_epi16(xmmRow0B0a, xmmCol0B0a); //Result for row 1 __m256i r1b0axc0b0a = _mm256_madd_epi16(xmmRow1B0a, xmmCol0B0a); //Result for row 2 __m256i r2b0axc0b0a = _mm256_madd_epi16(xmmRow2B0a, xmmCol0B0a); //Result for row 3 __m256i r3b0axc0b0a = _mm256_madd_epi16(xmmRow3B0a, xmmCol0B0a); *return1 = r0b0axc0b0a; *return2 = r1b0axc0b0a; *return3 = r2b0axc0b0a; *return4 = r3b0axc0b0a; } FORCEINLINE void BlockHandlerAVX::kernelavx32x1( __m256i xmmRow0B0a, __m256i xmmRow0B0b, short* B, __m256i* return1) { __m256i xmmCol0B0a = _mm256_load_si256((__m256i*)B); __m256i xmmCol0B0b = _mm256_load_si256((__m256i*)B + 1); //Result for row 0 //Nomenclature: //r0b0axc0b0a means "Row zero block zero part A times column zero block zero part A. (Blocks > 8 take up > 1 __m256i each (xmm registers)) __m256i r0b0axc0b0a = _mm256_madd_epi16(xmmRow0B0a, xmmCol0B0a); __m256i r0b0bxc0b0b = _mm256_madd_epi16(xmmRow0B0b, xmmCol0B0b); __m256i result1a = _mm256_add_epi32(r0b0axc0b0a, r0b0bxc0b0b); *return1 = result1a; } FORCEINLINE void BlockHandlerAVX::kernelavx32x4( __m256i xmmRow0B0a, __m256i xmmRow0B0b, __m256i xmmRow1B0a, __m256i xmmRow1B0b, __m256i xmmRow2B0a, __m256i xmmRow2B0b, __m256i xmmRow3B0a, __m256i xmmRow3B0b, short* B, __m256i* return1, __m256i* return2, __m256i * return3, __m256i* return4) { __m256i xmmCol0B0a = _mm256_load_si256((__m256i*)B); __m256i xmmCol0B0b = _mm256_load_si256((__m256i*)B + 1); //Result for row 0 //Nomenclature: //r0b0axc0b0a means "Row zero block zero part A times column zero block zero part A. (Blocks > 8 take up > 1 __m256i each (xmm registers)) __m256i r0b0axc0b0a = _mm256_madd_epi16(xmmRow0B0a, xmmCol0B0a); __m256i r0b0bxc0b0b = _mm256_madd_epi16(xmmRow0B0b, xmmCol0B0b); __m256i result1a = _mm256_add_epi32(r0b0axc0b0a, r0b0bxc0b0b); //Result for row 1 __m256i r1b0axc0b0a = _mm256_madd_epi16(xmmRow1B0a, xmmCol0B0a); __m256i r1b0bxc0b0b = _mm256_madd_epi16(xmmRow1B0b, xmmCol0B0b); __m256i result2a = _mm256_add_epi32(r1b0axc0b0a, r1b0bxc0b0b); //Result for row 2 __m256i r2b0axc0b0a = _mm256_madd_epi16(xmmRow2B0a, xmmCol0B0a); __m256i r2b0bxc0b0b = _mm256_madd_epi16(xmmRow2B0b, xmmCol0B0b); __m256i result3a = _mm256_add_epi32(r2b0axc0b0a, r2b0bxc0b0b); //Result for row 3 __m256i r3b0axc0b0a = _mm256_madd_epi16(xmmRow3B0a, xmmCol0B0a); __m256i r3b0bxc0b0b = _mm256_madd_epi16(xmmRow3B0b, xmmCol0B0b); __m256i result4a = _mm256_add_epi32(r3b0axc0b0a, r3b0bxc0b0b); *return1 = result1a; *return2 = result2a; *return3 = result3a; *return4 = result4a; } FORCEINLINE void BlockHandlerAVX::kernelavx64x1( __m256i xmmRow0B0a, __m256i xmmRow0B0b, __m256i xmmRow0B0c, __m256i xmmRow0B0d, short* B, __m256i* return1) { __m256i xmmCol0B0a = _mm256_load_si256((__m256i*)B); __m256i xmmCol0B0b = _mm256_load_si256((__m256i*)B + 1); __m256i xmmCol0B0c = _mm256_load_si256((__m256i*)B + 2); __m256i xmmCol0B0d = _mm256_load_si256((__m256i*)B + 3); __m256i r0b0axc0b0a = _mm256_madd_epi16(xmmRow0B0a, xmmCol0B0a); __m256i r0b0bxc0b0b = _mm256_madd_epi16(xmmRow0B0b, xmmCol0B0b); __m256i r0b0cxc0b0c = _mm256_madd_epi16(xmmRow0B0c, xmmCol0B0c); __m256i r0b0dxc0b0d = _mm256_madd_epi16(xmmRow0B0d, xmmCol0B0d); __m256i result1a = _mm256_add_epi32(r0b0axc0b0a, r0b0bxc0b0b); __m256i result1b = _mm256_add_epi32(r0b0cxc0b0c, r0b0dxc0b0d); __m256i result1ab = _mm256_add_epi32(result1a, result1b); *return1 = result1ab; //std::cout << "Returning " << u.i[0] << " + " << u.i[4] << "(" << u.i[0] + u.i[4] << ") for first row" << std::endl; } FORCEINLINE void BlockHandlerAVX::kernelavx64x4( __m256i xmmRow0B0a, __m256i xmmRow0B0b, __m256i xmmRow0B0c, __m256i xmmRow0B0d, __m256i xmmRow1B0a, __m256i xmmRow1B0b, __m256i xmmRow1B0c, __m256i xmmRow1B0d, __m256i xmmRow2B0a, __m256i xmmRow2B0b, __m256i xmmRow2B0c, __m256i xmmRow2B0d, __m256i xmmRow3B0a, __m256i xmmRow3B0b, __m256i xmmRow3B0c, __m256i xmmRow3B0d, short* B, __m256i* return1, __m256i* return2, __m256i * return3, __m256i* return4) { __m256i xmmCol0B0a = _mm256_load_si256((__m256i*)B); __m256i xmmCol0B0b = _mm256_load_si256((__m256i*)B + 1); __m256i xmmCol0B0c = _mm256_load_si256((__m256i*)B + 2); __m256i xmmCol0B0d = _mm256_load_si256((__m256i*)B + 3); //Result for row 0 //Nomenclature: //r0b0axc0b0a means "Row zero block zero part A times column zero block zero part A. (Blocks > 8 take up > 1 __m256i each (xmm registers)) __m256i r0b0axc0b0a = _mm256_madd_epi16(xmmRow0B0a, xmmCol0B0a); __m256i r0b0bxc0b0b = _mm256_madd_epi16(xmmRow0B0b, xmmCol0B0b); __m256i r0b0cxc0b0c = _mm256_madd_epi16(xmmRow0B0c, xmmCol0B0c); __m256i r0b0dxc0b0d = _mm256_madd_epi16(xmmRow0B0d, xmmCol0B0d); __m256i result1a = _mm256_add_epi32(r0b0axc0b0a, r0b0bxc0b0b); __m256i result1b = _mm256_add_epi32(r0b0cxc0b0c, r0b0dxc0b0d); __m256i result1ab = _mm256_add_epi32(result1a, result1b); //Result for row 1 __m256i r1b0axc0b0a = _mm256_madd_epi16(xmmRow1B0a, xmmCol0B0a); __m256i r1b0bxc0b0b = _mm256_madd_epi16(xmmRow1B0b, xmmCol0B0b); __m256i r1b0cxc0b0c = _mm256_madd_epi16(xmmRow1B0c, xmmCol0B0c); __m256i r1b0dxc0b0d = _mm256_madd_epi16(xmmRow1B0d, xmmCol0B0d); __m256i result2a = _mm256_add_epi32(r1b0axc0b0a, r1b0bxc0b0b); __m256i result2b = _mm256_add_epi32(r1b0cxc0b0c, r1b0dxc0b0d); __m256i result2ab = _mm256_add_epi32(result2a, result2b); //Result for row 2 __m256i r2b0axc0b0a = _mm256_madd_epi16(xmmRow2B0a, xmmCol0B0a); __m256i r2b0bxc0b0b = _mm256_madd_epi16(xmmRow2B0b, xmmCol0B0b); __m256i r2b0cxc0b0c = _mm256_madd_epi16(xmmRow2B0c, xmmCol0B0c); __m256i r2b0dxc0b0d = _mm256_madd_epi16(xmmRow2B0d, xmmCol0B0d); __m256i result3a = _mm256_add_epi32(r2b0axc0b0a, r2b0bxc0b0b); __m256i result3b = _mm256_add_epi32(r2b0cxc0b0c, r2b0dxc0b0d); __m256i result3ab = _mm256_add_epi32(result3a, result3b); //Result for row 3 __m256i r3b0axc0b0a = _mm256_madd_epi16(xmmRow3B0a, xmmCol0B0a); __m256i r3b0bxc0b0b = _mm256_madd_epi16(xmmRow3B0b, xmmCol0B0b); __m256i r3b0cxc0b0c = _mm256_madd_epi16(xmmRow3B0c, xmmCol0B0c); __m256i r3b0dxc0b0d = _mm256_madd_epi16(xmmRow3B0d, xmmCol0B0d); __m256i result4a = _mm256_add_epi32(r3b0axc0b0a, r3b0bxc0b0b); __m256i result4b = _mm256_add_epi32(r3b0cxc0b0c, r3b0dxc0b0d); __m256i result4ab = _mm256_add_epi32(result4a, result4b); *return1 = result1ab; *return2 = result2ab; *return3 = result3ab; *return4 = result4ab; } FORCEINLINE void BlockHandlerAVX::kernelavx128x1( __m256i xmmRow0B0a, __m256i xmmRow0B0b, __m256i xmmRow0B0c, __m256i xmmRow0B0d, __m256i xmmRow0B0e, __m256i xmmRow0B0f, __m256i xmmRow0B0g, __m256i xmmRow0B0h, short* B, __m256i* return1) { __m256i xmmCol0B0a = _mm256_load_si256((__m256i*)B); __m256i xmmCol0B0b = _mm256_load_si256((__m256i*)(B + 16)); __m256i xmmCol0B0c = _mm256_load_si256((__m256i*)(B + 32)); __m256i xmmCol0B0d = _mm256_load_si256((__m256i*)(B + 48)); __m256i xmmCol0B0e = _mm256_load_si256((__m256i*)(B + 64)); __m256i xmmCol0B0f = _mm256_load_si256((__m256i*)(B + 80)); __m256i xmmCol0B0g = _mm256_load_si256((__m256i*)(B + 96)); __m256i xmmCol0B0h = _mm256_load_si256((__m256i*)(B + 112)); //Result for row 0 //Nomenclature: //r0b0axc0b0a means "Row zero block zero part A times column zero block zero part A. (Blocks > 8 take up > 1 __m256i each (xmm registers)) __m256i r0b0axc0b0a = _mm256_madd_epi16(xmmRow0B0a, xmmCol0B0a); __m256i r0b0bxc0b0b = _mm256_madd_epi16(xmmRow0B0b, xmmCol0B0b); __m256i r0b0cxc0b0c = _mm256_madd_epi16(xmmRow0B0c, xmmCol0B0c); __m256i r0b0dxc0b0d = _mm256_madd_epi16(xmmRow0B0d, xmmCol0B0d); __m256i r0b0exc0b0e = _mm256_madd_epi16(xmmRow0B0e, xmmCol0B0e); __m256i r0b0fxc0b0f = _mm256_madd_epi16(xmmRow0B0f, xmmCol0B0f); __m256i r0b0gxc0b0g = _mm256_madd_epi16(xmmRow0B0g, xmmCol0B0g); __m256i r0b0hxc0b0h = _mm256_madd_epi16(xmmRow0B0h, xmmCol0B0h); __m256i result1a = _mm256_add_epi32(r0b0axc0b0a, r0b0bxc0b0b); __m256i result1b = _mm256_add_epi32(r0b0cxc0b0c, r0b0dxc0b0d); __m256i result1c = _mm256_add_epi32(r0b0exc0b0e, r0b0fxc0b0f); __m256i result1d = _mm256_add_epi32(r0b0gxc0b0g, r0b0hxc0b0h); __m256i result1ab = _mm256_add_epi32(result1a, result1b); __m256i result1cd = _mm256_add_epi32(result1c, result1d); __m256i result1abcd = _mm256_add_epi32(result1ab, result1cd); *return1 = result1abcd; //std::cout << "Returning " << u.i[0] << " + " << u.i[4] << "(" << u.i[0] + u.i[4] << ") for first row" << std::endl; } FORCEINLINE void BlockHandlerAVX::kernelavx128x4( __m256i xmmRow0B0a, __m256i xmmRow0B0b, __m256i xmmRow0B0c, __m256i xmmRow0B0d, __m256i xmmRow0B0e, __m256i xmmRow0B0f, __m256i xmmRow0B0g, __m256i xmmRow0B0h, __m256i xmmRow1B0a, __m256i xmmRow1B0b, __m256i xmmRow1B0c, __m256i xmmRow1B0d, __m256i xmmRow1B0e, __m256i xmmRow1B0f, __m256i xmmRow1B0g, __m256i xmmRow1B0h, __m256i xmmRow2B0a, __m256i xmmRow2B0b, __m256i xmmRow2B0c, __m256i xmmRow2B0d, __m256i xmmRow2B0e, __m256i xmmRow2B0f, __m256i xmmRow2B0g, __m256i xmmRow2B0h, __m256i xmmRow3B0a, __m256i xmmRow3B0b, __m256i xmmRow3B0c, __m256i xmmRow3B0d, __m256i xmmRow3B0e, __m256i xmmRow3B0f, __m256i xmmRow3B0g, __m256i xmmRow3B0h, short* B, __m256i* return1, __m256i* return2, __m256i * return3, __m256i* return4) { __m256i xmmCol0B0a = _mm256_load_si256((__m256i*)B); __m256i xmmCol0B0b = _mm256_load_si256((__m256i*)(B + 16)); __m256i xmmCol0B0c = _mm256_load_si256((__m256i*)(B + 32)); __m256i xmmCol0B0d = _mm256_load_si256((__m256i*)(B + 48)); __m256i xmmCol0B0e = _mm256_load_si256((__m256i*)(B + 64)); __m256i xmmCol0B0f = _mm256_load_si256((__m256i*)(B + 80)); __m256i xmmCol0B0g = _mm256_load_si256((__m256i*)(B + 96)); __m256i xmmCol0B0h = _mm256_load_si256((__m256i*)(B + 112)); //Result for row 0 //Nomenclature: //r0b0axc0b0a means "Row zero block zero part A times column zero block zero part A. (Blocks > 8 take up > 1 __m256i each (xmm registers)) __m256i r0b0axc0b0a = _mm256_madd_epi16(xmmRow0B0a, xmmCol0B0a); __m256i r0b0bxc0b0b = _mm256_madd_epi16(xmmRow0B0b, xmmCol0B0b); __m256i r0b0cxc0b0c = _mm256_madd_epi16(xmmRow0B0c, xmmCol0B0c); __m256i r0b0dxc0b0d = _mm256_madd_epi16(xmmRow0B0d, xmmCol0B0d); __m256i r0b0exc0b0e = _mm256_madd_epi16(xmmRow0B0e, xmmCol0B0e); __m256i r0b0fxc0b0f = _mm256_madd_epi16(xmmRow0B0f, xmmCol0B0f); __m256i r0b0gxc0b0g = _mm256_madd_epi16(xmmRow0B0g, xmmCol0B0g); __m256i r0b0hxc0b0h = _mm256_madd_epi16(xmmRow0B0h, xmmCol0B0h); __m256i result1a = _mm256_add_epi32(r0b0axc0b0a, r0b0bxc0b0b); __m256i result1b = _mm256_add_epi32(r0b0cxc0b0c, r0b0dxc0b0d); __m256i result1c = _mm256_add_epi32(r0b0exc0b0e, r0b0fxc0b0f); __m256i result1d = _mm256_add_epi32(r0b0gxc0b0g, r0b0hxc0b0h); __m256i result1ab = _mm256_add_epi32(result1a, result1b); __m256i result1cd = _mm256_add_epi32(result1c, result1d); __m256i result1abcd = _mm256_add_epi32(result1ab, result1cd); //Result for row 1 __m256i r1b0axc0b0a = _mm256_madd_epi16(xmmRow1B0a, xmmCol0B0a); __m256i r1b0bxc0b0b = _mm256_madd_epi16(xmmRow1B0b, xmmCol0B0b); __m256i r1b0cxc0b0c = _mm256_madd_epi16(xmmRow1B0c, xmmCol0B0c); __m256i r1b0dxc0b0d = _mm256_madd_epi16(xmmRow1B0d, xmmCol0B0d); __m256i r1b0exc0b0e = _mm256_madd_epi16(xmmRow1B0e, xmmCol0B0e); __m256i r1b0fxc0b0f = _mm256_madd_epi16(xmmRow1B0f, xmmCol0B0f); __m256i r1b0gxc0b0g = _mm256_madd_epi16(xmmRow1B0g, xmmCol0B0g); __m256i r1b0hxc0b0h = _mm256_madd_epi16(xmmRow1B0h, xmmCol0B0h); __m256i result2a = _mm256_add_epi32(r1b0axc0b0a, r1b0bxc0b0b); __m256i result2b = _mm256_add_epi32(r1b0cxc0b0c, r1b0dxc0b0d); __m256i result2c = _mm256_add_epi32(r1b0exc0b0e, r1b0fxc0b0f); __m256i result2d = _mm256_add_epi32(r1b0gxc0b0g, r1b0hxc0b0h); __m256i result2ab = _mm256_add_epi32(result2a, result2b); __m256i result2cd = _mm256_add_epi32(result2c, result2d); __m256i result2abcd = _mm256_add_epi32(result2ab, result2cd); //Result for row 2 __m256i r2b0axc0b0a = _mm256_madd_epi16(xmmRow2B0a, xmmCol0B0a); __m256i r2b0bxc0b0b = _mm256_madd_epi16(xmmRow2B0b, xmmCol0B0b); __m256i r2b0cxc0b0c = _mm256_madd_epi16(xmmRow2B0c, xmmCol0B0c); __m256i r2b0dxc0b0d = _mm256_madd_epi16(xmmRow2B0d, xmmCol0B0d); __m256i r2b0exc0b0e = _mm256_madd_epi16(xmmRow2B0e, xmmCol0B0e); __m256i r2b0fxc0b0f = _mm256_madd_epi16(xmmRow2B0f, xmmCol0B0f); __m256i r2b0gxc0b0g = _mm256_madd_epi16(xmmRow2B0g, xmmCol0B0g); __m256i r2b0hxc0b0h = _mm256_madd_epi16(xmmRow2B0h, xmmCol0B0h); __m256i result3a = _mm256_add_epi32(r2b0axc0b0a, r2b0bxc0b0b); __m256i result3b = _mm256_add_epi32(r2b0cxc0b0c, r2b0dxc0b0d); __m256i result3c = _mm256_add_epi32(r2b0exc0b0e, r2b0fxc0b0f); __m256i result3d = _mm256_add_epi32(r2b0gxc0b0g, r2b0hxc0b0h); __m256i result3ab = _mm256_add_epi32(result3a, result3b); __m256i result3cd = _mm256_add_epi32(result3c, result3d); __m256i result3abcd = _mm256_add_epi32(result3ab, result3cd); //Result for row 3 __m256i r3b0axc0b0a = _mm256_madd_epi16(xmmRow3B0a, xmmCol0B0a); __m256i r3b0bxc0b0b = _mm256_madd_epi16(xmmRow3B0b, xmmCol0B0b); __m256i r3b0cxc0b0c = _mm256_madd_epi16(xmmRow3B0c, xmmCol0B0c); __m256i r3b0dxc0b0d = _mm256_madd_epi16(xmmRow3B0d, xmmCol0B0d); __m256i r3b0exc0b0e = _mm256_madd_epi16(xmmRow3B0e, xmmCol0B0e); __m256i r3b0fxc0b0f = _mm256_madd_epi16(xmmRow3B0f, xmmCol0B0f); __m256i r3b0gxc0b0g = _mm256_madd_epi16(xmmRow3B0g, xmmCol0B0g); __m256i r3b0hxc0b0h = _mm256_madd_epi16(xmmRow3B0h, xmmCol0B0h); __m256i result4a = _mm256_add_epi32(r3b0axc0b0a, r3b0bxc0b0b); __m256i result4b = _mm256_add_epi32(r3b0cxc0b0c, r3b0dxc0b0d); __m256i result4c = _mm256_add_epi32(r3b0exc0b0e, r3b0fxc0b0f); __m256i result4d = _mm256_add_epi32(r3b0gxc0b0g, r3b0hxc0b0h); __m256i result4ab = _mm256_add_epi32(result4a, result4b); __m256i result4cd = _mm256_add_epi32(result4c, result4d); __m256i result4abcd = _mm256_add_epi32(result4ab, result4cd); //Now we can just add horizontally *return1 = result1abcd; *return2 = result2abcd; *return3 = result3abcd; *return4 = result4abcd; } }}}
postgres_fmt_plug.c
/* PostgreSQL MD5 challenge-response cracker patch for JtR. Hacked together * during October of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * Use Ettercap to get PostgreSQL MD5 challenge-response pairs in JtR format. * E.g. ettercap -Tq -r /home/user/sample.pcap * * Input format: * $postgres$user*salt*hash * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com> * and Copyright magnum 2013, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_postgres; #elif FMT_REGISTERS_H john_register_one(&fmt_postgres); #else #include <string.h> #include <errno.h> #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2048 // scaled on K8-dual HT #endif #endif #include "md5.h" #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "postgres" #define FORMAT_NAME "PostgreSQL C/R" #define FORMAT_TAG "$postgres$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define FORMAT_TAG2 "$postgre$" #define FORMAT_TAG2_LEN (sizeof(FORMAT_TAG2)-1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define BINARY_SIZE 16 #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN MEM_ALIGN_NONE #define MAX_USERNAME_LEN 64 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests postgres_tests[] = { {"$postgres$postgres*f063f05d*1d586cc8d137e5f1733f234d224393e8", "openwall"}, {"$postgres$postgres*c31803a2*1c4e11fb51835c3bbe9851ec91ec1375", "password"}, /* $postgre$ is supported but deprecated */ {"$postgre$postgres*684697c8*bf2a64f35feba7bf1b633d60393c1356", "openwall"}, /* $postgres$ with longer user name */ {"$postgres$Twelve_chars*55393156*c01df9affa7573ef32ec143759f3e005", "HookFish__2"}, {"$postgres$postgres*65687433*b782eca219ad84b58f26d25e19a1bbc9", "thisisalongstring"}, {"$postgres$postgres*33374273*77e0016f1b92cdea7291ab0ed21798b8", "string with space"}, {"$postgres$postgres*6f734f37*d5451e93f6ac9a0d30336ec106e91cf5", "123456789"}, {"$postgres$postgres*3348654b*0f0f46a3dfebf45f4320d2edeabc318f", ""}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct custom_salt { unsigned char user[MAX_USERNAME_LEN + 1]; unsigned char salt[4]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { const char *p; int extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; /* Check hash */ if (!(p = strrchr(ciphertext, '*'))) return 0; if (hexlenl(&p[1], &extra) != 2*BINARY_SIZE || extra) return 0; /* Check salt */ p -= 9; if (*p != '*') return 0; if (hexlenl(&p[1], 0) != 8) return 0; /* Check username length */ if (p - ciphertext - FORMAT_TAG_LEN > MAX_USERNAME_LEN) return 0; return 1; } static char *prepare(char *split_fields[10], struct fmt_main *self) { static char out[FORMAT_TAG_LEN + sizeof(struct custom_salt) + 2*BINARY_SIZE +2+1]; /* Replace deprecated tag */ if (*split_fields[1] && !strncmp(split_fields[1], FORMAT_TAG2, FORMAT_TAG2_LEN)) { snprintf(out, sizeof(out), "%s%s", FORMAT_TAG, &split_fields[1][FORMAT_TAG2_LEN]); if (valid(out, self)) return out; } return split_fields[1]; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; ctcopy += FORMAT_TAG_LEN; /* skip over "$postgres$" */ p = strtokm(ctcopy, "*"); memset(&cs, 0, sizeof(cs)); strnzcpy((char*)cs.user, p, MAX_USERNAME_LEN + 1); p = strtokm(NULL, "*"); for (i = 0; i < 4; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static inline void hex_encode(unsigned char *str, int len, unsigned char *out) { int i; for (i = 0; i < len; ++i) { out[0] = itoa16[str[i]>>4]; out[1] = itoa16[str[i]&0xF]; out += 2; } } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (index = 0; index < count; index++) #endif { MD5_CTX ctx; unsigned char out[32]; MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index], strlen(saved_key[index])); MD5_Update(&ctx, cur_salt->user, strlen((char*)cur_salt->user)); MD5_Final((unsigned char*)crypt_out[index], &ctx); hex_encode((unsigned char*)crypt_out[index], 16, out); MD5_Init(&ctx); MD5_Update(&ctx, out, 32); MD5_Update(&ctx, cur_salt->salt, 4); MD5_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void postgres_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_postgres = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD, { NULL }, { FORMAT_TAG, FORMAT_TAG2 }, postgres_tests }, { init, done, fmt_default_reset, prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, postgres_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_binop__bset_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bset_int8 // A.*B function (eWiseMult): GB_AemultB__bset_int8 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bset_int8 // C+=b function (dense accum): GB_Cdense_accumb__bset_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bset_int8 // C=scalar+B GB_bind1st__bset_int8 // C=scalar+B' GB_bind1st_tran__bset_int8 // C=A+scalar GB_bind2nd__bset_int8 // C=A'+scalar GB_bind2nd_tran__bset_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = GB_BITSET (aij, bij, int8_t, 8) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_BITSET (x, y, int8_t, 8) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_INT8 || GxB_NO_BSET_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bset_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bset_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bset_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bset_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bset_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bset_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = GB_BITSET (x, bij, int8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bset_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = GB_BITSET (aij, y, int8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_BITSET (x, aij, int8_t, 8) ; \ } GrB_Info GB_bind1st_tran__bset_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_BITSET (aij, y, int8_t, 8) ; \ } GrB_Info GB_bind2nd_tran__bset_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
SpatialConvolutionMM.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialConvolutionMM.c" #else #include <ATen/div_rtn.h> static inline void THNN_(SpatialConvolutionMM_shapeCheck)( THTensor *input, THTensor *gradOutput, THTensor *weight, THTensor *bias, int kH, int kW, int dH, int dW, int padH, int padW, int weight_nullable) { THArgCheck(kW > 0 && kH > 0, 9, "kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW); THArgCheck(dW > 0 && dH > 0, 11, "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); if (weight != NULL) { THNN_ARGCHECK(!weight->is_empty() && (weight->dim() == 2 || weight->dim() == 4), 5, weight, "non-empty 2D or 4D weight tensor expected, but got: %s"); if (bias != NULL) { THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size(0)); } } else if (!weight_nullable) { THError("weight tensor is expected to be non-nullable"); } int ndim = input->dim(); int dimf = 0; int dimh = 1; int dimw = 2; if (ndim == 4) { dimf++; dimh++; dimw++; } THNN_ARGCHECK(!input->is_empty() && (ndim == 3 || ndim == 4), 2, input, "non-empty 3D or 4D input tensor expected but got: %s"); int64_t inputHeight = input->size(dimh); int64_t inputWidth = input->size(dimw); int64_t exactInputHeight = inputHeight + 2 * padH; int64_t exactInputWidth = inputWidth + 2 * padW; if (exactInputHeight < kH || exactInputWidth < kW) { THError("Calculated padded input size per channel: (%ld x %ld). " "Kernel size: (%ld x %ld). Kernel size can't be greater than actual input size", exactInputHeight, exactInputWidth, kH, kW); } int64_t outputHeight = div_rtn<int64_t>(exactInputHeight - kH, dH) + 1; int64_t outputWidth = div_rtn<int64_t>(exactInputWidth - kW, dW) + 1; if (outputWidth < 1 || outputHeight < 1) { THError("Given input size per channel: (%ld x %ld). " "Calculated output size per channel: (%ld x %ld). Output size is too small", inputHeight, inputWidth, outputHeight, outputWidth); } if (weight != NULL) { int64_t nInputPlane = weight->size(1); if (weight->dim() == 2) { nInputPlane /= (kH * kW); } THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane); } if (gradOutput != NULL) { if (weight != NULL) { int64_t nOutputPlane = weight->size(0); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); } else if (bias != NULL) { int64_t nOutputPlane = THTensor_sizeLegacyNoScalars(bias, 0); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); } THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); } } static THTensor* THNN_(newViewWeightMM2d)(THTensor *weight) { weight = THTensor_(newContiguous)(weight); if (weight->dim() == 4) { int64_t s1 = weight->size(0); int64_t s2 = weight->size(1) * weight->size(2) * weight->size(3); THTensor *old_weight = weight; weight = THTensor_(newWithStorage2d)(THTensor_getStoragePtr(weight), weight->storage_offset(), s1, -1, s2, -1); THTensor_(free)(old_weight); } return weight; } static void THNN_(SpatialConvolutionMM_updateOutput_frame)( THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t nInputPlane, int64_t inputWidth, int64_t inputHeight, int64_t nOutputPlane, int64_t outputWidth, int64_t outputHeight) { int64_t i; THTensor *output2d; THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, outputWidth, outputHeight); output2d = THTensor_(newWithStorage2d)(THTensor_getStoragePtr(output), output->storage_offset(), nOutputPlane, -1, outputHeight*outputWidth, -1); if (bias) { for(i = 0; i < nOutputPlane; i++) THVector_(fill) (THStorage_(data)(THTensor_getStoragePtr(output)) + output->storage_offset() + output->stride(0) * i, THTensor_(get1d)(bias, i), outputHeight*outputWidth); } else { THTensor_(zero)(output); } THTensor_(addmm)(output2d, 1, output2d, 1, weight, finput); THTensor_(free)(output2d); } void THNN_(SpatialConvolutionMM_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) { weight = THNN_(newViewWeightMM2d)(weight); THNN_(SpatialConvolutionMM_shapeCheck) (input, NULL, weight, bias, kH, kW, dH, dW, padH, padW, 0); input = THTensor_(newContiguous)(input); int ndim = input->dim(); int dimf = 0; int dimh = 1; int dimw = 2; if (ndim == 4) { dimf++; dimh++; dimw++; } int64_t nInputPlane = input->size(dimf); int64_t inputHeight = input->size(dimh); int64_t inputWidth = input->size(dimw); int64_t nOutputPlane = weight->size(0); int64_t outputHeight = (inputHeight + 2*padH - kH) / dH + 1; int64_t outputWidth = (inputWidth + 2*padW - kW) / dW + 1; if(input->dim() == 3) { THTensor_(resize2d)(finput, kW*kH*nInputPlane, outputHeight*outputWidth); THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth); THNN_(SpatialConvolutionMM_updateOutput_frame) (input, output, weight, bias, finput, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); } else { int64_t T = input->size(0); int64_t t; THTensor_(resize3d)(finput, T, kW*kH*nInputPlane, outputHeight*outputWidth); THTensor_(resize4d)(output, T, nOutputPlane, outputHeight, outputWidth); #pragma omp parallel for private(t) for(t = 0; t < T; t++) { THTensor *input_t = THTensor_(newSelect)(input, 0, t); THTensor *output_t = THTensor_(newSelect)(output, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(SpatialConvolutionMM_updateOutput_frame) (input_t, output_t, weight, bias, finput_t, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); THTensor_(free)(input_t); THTensor_(free)(output_t); THTensor_(free)(finput_t); } } THTensor_(free)(input); THTensor_(free)(weight); } static void THNN_(SpatialConvolutionMM_updateGradInput_frame)( THTensor *gradInput, THTensor *gradOutput, THTensor *weight, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) { THTensor *gradOutput2d = THTensor_(newWithStorage2d) (THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(), gradOutput->size(0), -1, gradOutput->size(1)*gradOutput->size(2), -1); THTensor_(addmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput2d); THTensor_(free)(gradOutput2d); THTensor_(zero)(gradInput); THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, padW, padH, gradInput->size(0), gradInput->size(2), gradInput->size(1), gradOutput->size(2), gradOutput->size(1)); } void THNN_(SpatialConvolutionMM_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *finput, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) { weight = THNN_(newViewWeightMM2d)(weight); THNN_(SpatialConvolutionMM_shapeCheck) (input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW, 0); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); THTensor_(resizeAs)(gradInput, input); THTensor_(resizeAs)(fgradInput, finput); // depending on the BLAS library, fgradInput (result tensor) might // be left uninitialized on zero alpha, which might lead to weird behavior // hence, to be safe, zero it THTensor_(zero)(fgradInput); THTensor *tweight = THTensor_(new)(); THTensor_(transpose)(tweight, weight, 0, 1); if(input->dim() == 3) { THNN_(SpatialConvolutionMM_updateGradInput_frame)(gradInput, gradOutput, tweight, fgradInput, kW, kH, dW, dH, padW, padH); } else { int64_t T = input->size(0); int64_t t; #pragma omp parallel for private(t) for(t = 0; t < T; t++) { THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); THNN_(SpatialConvolutionMM_updateGradInput_frame)(gradInput_t, gradOutput_t, tweight, fgradInput_t, kW, kH, dW, dH, padW, padH); THTensor_(free)(gradInput_t); THTensor_(free)(gradOutput_t); THTensor_(free)(fgradInput_t); } } THTensor_(free)(tweight); THTensor_(free)(input); THTensor_(free)(gradOutput); THTensor_(free)(weight); } static void THNN_(SpatialConvolutionMM_accGradParameters_frame)( THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, real scale) { int64_t i; THTensor *gradOutput2d = THTensor_(newWithStorage2d) (THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(), gradOutput->size(0), -1, gradOutput->size(1)*gradOutput->size(2), -1); if (gradWeight) { THTensor *tfinput = THTensor_(new)(); THTensor_(transpose)(tfinput, finput, 0, 1); THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, tfinput); THTensor_(free)(tfinput); } if (gradBias) { for(i = 0; i < THTensor_sizeLegacyNoScalars(gradBias, 0); i++) { int64_t k; real sum = 0; real *data = THStorage_(data)(THTensor_getStoragePtr(gradOutput2d)) + gradOutput2d->storage_offset() + i*gradOutput2d->stride(0); for(k = 0; k < gradOutput2d->size(1); k++) sum += data[k]; (THStorage_(data)(THTensor_getStoragePtr(gradBias)) + gradBias->storage_offset())[i] += scale*sum; } } THTensor_(free)(gradOutput2d); } void THNN_(SpatialConvolutionMM_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, // can be NULL if gradWeight = NULL THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, accreal scale_) { real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); if (gradWeight) { THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous"); gradWeight = THNN_(newViewWeightMM2d)(gradWeight); } if (gradBias) { THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous"); } THNN_(SpatialConvolutionMM_shapeCheck) (input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW, 1); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); if(input->dim() == 3) { THNN_(SpatialConvolutionMM_accGradParameters_frame)(gradOutput, gradWeight, gradBias, finput, scale); } else { int64_t T = input->size(0); int64_t t; for(t = 0; t < T; t++) { THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *finput_t = NULL; if (gradWeight) { finput_t = THTensor_(newSelect)(finput, 0, t); } THNN_(SpatialConvolutionMM_accGradParameters_frame)(gradOutput_t, gradWeight, gradBias, finput_t, scale); THTensor_(free)(gradOutput_t); if (gradWeight) { THTensor_(free)(finput_t); } } } THTensor_(free)(input); THTensor_(free)(gradOutput); if (gradWeight) { THTensor_(free)(gradWeight); } } #endif
3DConvolution_teams.c
/** * 3DConvolution.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <omp.h> #include "../../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define ERROR_THRESHOLD 0.5 /* Problem size */ #define NI 512 #define NJ 512 #define NK 512 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void conv3D(DATA_TYPE* A, DATA_TYPE* B) { int i, j, k; DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33; c11 = +2; c21 = +5; c31 = -8; c12 = -3; c22 = +6; c32 = -9; c13 = +4; c23 = +7; c33 = +10; for (j = 1; j < NJ - 1; ++j) { for (i = 1; i < NI - 1; ++i) { for (k = 1; k < NK -1; ++k) { B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)] + c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)] + c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]; } } } } void conv3D_OMP(DATA_TYPE* A, DATA_TYPE* B) { int i, j, k; DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33; c11 = +2; c21 = +5; c31 = -8; c12 = -3; c22 = +6; c32 = -9; c13 = +4; c23 = +7; c33 = +10; #pragma omp target teams distribute parallel for map(to:A[:NI*NJ*NK]) map(from:B[:NI*NJ*NK]) for (j = 1; j < NJ - 1; ++j) { for (i = 1; i < NI - 1; ++i) { int k; for (k = 1; k < NK -1; ++k) { B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)] + c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)] + c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]; } } } } void init(DATA_TYPE* A) { int i, j, k; for (i = 0; i < NI; ++i) { for (j = 0; j < NJ; ++j) { for (k = 0; k < NK; ++k) { A[i*(NK * NJ) + j*NK + k] = i % 12 + 2 * (j % 7) + 3 * (k % 13); } } } } void compareResults(DATA_TYPE* B, DATA_TYPE* B_GPU) { int i, j, k, fail; fail = 0; // Compare result from cpu and gpu... for (i = 1; i < NI - 1; ++i) { for (j = 1; j < NJ - 1; ++j) { for (k = 1; k < NK - 1; ++k) { if (percentDiff(B[i*(NK * NJ) + j*NK + k], B_GPU[i*(NK * NJ) + j*NK + k]) > ERROR_THRESHOLD) { fail++; } } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", ERROR_THRESHOLD, fail); } int main(int argc, char *argv[]) { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* B_GPU; A = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE)); B_GPU = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE)); fprintf(stdout, ">> Three dimensional (3D) convolution <<\n"); init(A); t_start = rtclock(); conv3D_OMP(A, B_GPU); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); t_start = rtclock(); conv3D(A, B); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(B, B_GPU); free(A); free(B); free(B_GPU); return 0; }
stream.c
/*-----------------------------------------------------------------------*/ /* Program: STREAM */ /* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2013: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear, and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ # include <stdio.h> # include <unistd.h> # include <math.h> # include <float.h> # include <limits.h> # include <sys/time.h> /*----------------------------------------------------------------------- * INSTRUCTIONS: * * 1) STREAM requires different amounts of memory to run on different * systems, depending on both the system cache size(s) and the * granularity of the system timer. * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) * to meet *both* of the following criteria: * (a) Each array must be at least 4 times the size of the * available cache memory. I don't worry about the difference * between 10^6 and 2^20, so in practice the minimum array size * is about 3.8 times the cache size. * Example 1: One Xeon E3 with 8 MB L3 cache * STREAM_ARRAY_SIZE should be >= 4 million, giving * an array size of 30.5 MB and a total memory requirement * of 91.5 MB. * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) * STREAM_ARRAY_SIZE should be >= 20 million, giving * an array size of 153 MB and a total memory requirement * of 458 MB. * (b) The size should be large enough so that the 'timing calibration' * output by the program is at least 20 clock-ticks. * Example: most versions of Windows have a 10 millisecond timer * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. * This means the each array must be at least 1 GB, or 128M elements. * * Version 5.10 increases the default array size from 2 million * elements to 10 million elements in response to the increasing * size of L3 caches. The new default size is large enough for caches * up to 20 MB. * Version 5.10 changes the loop index variables from "register int" * to "ssize_t", which allows array indices >2^32 (4 billion) * on properly configured 64-bit systems. Additional compiler options * (such as "-mcmodel=medium") may be required for large memory runs. * * Array size can be set at compile time without modifying the source * code for the (many) compilers that support preprocessor definitions * on the compile line. E.g., * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M * will override the default size of 10M with a new size of 100M elements * per array. */ #ifndef STREAM_ARRAY_SIZE # define STREAM_ARRAY_SIZE 10000000 #endif /* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result * for any iteration after the first, therefore the minimum value * for NTIMES is 2. * There are no rules on maximum allowable values for NTIMES, but * values larger than the default are unlikely to noticeably * increase the reported performance. * NTIMES can also be set on the compile line without changing the source * code using, for example, "-DNTIMES=7". */ #ifdef NTIMES #if NTIMES<=1 # define NTIMES 10 #endif #endif #ifndef NTIMES # define NTIMES 10 #endif /* Users are allowed to modify the "OFFSET" variable, which *may* change the * relative alignment of the arrays (though compilers may change the * effective offset by making the arrays non-contiguous on some systems). * Use of non-zero values for OFFSET can be especially helpful if the * STREAM_ARRAY_SIZE is set to a value close to a large power of 2. * OFFSET can also be set on the compile line without changing the source * code using, for example, "-DOFFSET=56". */ #ifndef OFFSET # define OFFSET 0 #endif /* * 3) Compile the code with optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. * If the results are unreasonably good, on the other hand, the * optimizer might be too smart for me! * * For a simple single-core version, try compiling with: * cc -O stream.c -o stream * This is known to work on many, many systems.... * * To use multiple cores, you need to tell the compiler to obey the OpenMP * directives in the code. This varies by compiler, but a common example is * gcc -O -fopenmp stream.c -o stream_omp * The environment variable OMP_NUM_THREADS allows runtime control of the * number of threads/cores used when the resulting "stream_omp" program * is executed. * * To run with single-precision variables and arithmetic, simply add * -DSTREAM_TYPE=float * to the compile line. * Note that this changes the minimum array sizes required --- see (1) above. * * The preprocessor directive "TUNED" does not do much -- it simply causes the * code to call separate functions to execute each kernel. Trivial versions * of these functions are provided, but they are *not* tuned -- they just * provide predefined interfaces to be replaced with tuned code. * * * 4) Optional: Mail the results to mccalpin@cs.virginia.edu * Be sure to include info that will help me understand: * a) the computer hardware configuration (e.g., processor model, memory type) * b) the compiler name/version and compilation flags * c) any run-time information (such as OMP_NUM_THREADS) * d) all of the output from the test case. * * Thanks! * *-----------------------------------------------------------------------*/ # define HLINE "-------------------------------------------------------------\n" # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif #ifndef STREAM_TYPE #define STREAM_TYPE double #endif static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET], b[STREAM_ARRAY_SIZE+OFFSET], c[STREAM_ARRAY_SIZE+OFFSET]; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE }; extern double mysecond(); extern void checkSTREAMresults(); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(STREAM_TYPE scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(STREAM_TYPE scalar); #endif #ifdef _OPENMP extern int omp_get_num_threads(); #endif int main() { int quantum, checktick(); int BytesPerWord; int k; ssize_t j; STREAM_TYPE scalar; double t, times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.10 $\n"); printf(HLINE); BytesPerWord = sizeof(STREAM_TYPE); printf("This system uses %d bytes per array element.\n", BytesPerWord); printf(HLINE); #ifdef N printf("***** WARNING: ******\n"); printf(" It appears that you set the preprocessor variable N when compiling this code.\n"); printf(" This version of the code uses the preprocessor variable STREAM_ARRAY_SIZE to control the array size\n"); printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE); printf("***** WARNING: ******\n"); #endif printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET); printf("Memory per array = %.1f MiB (= %.1f GiB).\n", BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0), BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0)); printf("Total memory required = %.1f MiB (= %.1f GiB).\n", (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.), (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.)); printf("Each kernel will be executed %d times.\n", NTIMES); printf(" The *best* time for each kernel (excluding the first iteration)\n"); printf(" will be used to compute the reported bandwidth.\n"); #ifdef _OPENMP printf(HLINE); #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } } #endif #ifdef _OPENMP k = 0; #pragma omp parallel #pragma omp atomic k++; printf ("Number of Threads counted = %i\n",k); #endif /* Get initial value for system clock. */ #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); if ( (quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); #pragma omp parallel for for (j = 0; j < STREAM_ARRAY_SIZE; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; #endif times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Best Rate MB/s Avg time Min time Max time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); return 0; } # define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = MIN(minDelta, MAX(Delta,0)); } return(minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #include <sys/time.h> double mysecond() { struct timeval tp; struct timezone tzp; (void)gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif void checkSTREAMresults () { STREAM_TYPE aj,bj,cj,scalar; STREAM_TYPE aSumErr,bSumErr,cSumErr; STREAM_TYPE aAvgErr,bAvgErr,cAvgErr; double epsilon; ssize_t j; int k,ierr,err; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k=0; k<NTIMES; k++) { cj = aj; bj = scalar*cj; cj = aj+bj; aj = bj+scalar*cj; } /* accumulate deltas between observed and expected results */ aSumErr = 0.0; bSumErr = 0.0; cSumErr = 0.0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { aSumErr += abs(a[j] - aj); bSumErr += abs(b[j] - bj); cSumErr += abs(c[j] - cj); // if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN } aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; if (sizeof(STREAM_TYPE) == 4) { epsilon = 1.e-6; } else if (sizeof(STREAM_TYPE) == 8) { epsilon = 1.e-13; } else { printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE)); epsilon = 1.e-6; } err = 0; if (abs(aAvgErr/aj) > epsilon) { err++; printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(a[j]/aj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,aj,a[j],abs((aj-a[j])/aAvgErr)); } #endif } } printf(" For array a[], %d errors were found.\n",ierr); } if (abs(bAvgErr/bj) > epsilon) { err++; printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(b[j]/bj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,bj,b[j],abs((bj-b[j])/bAvgErr)); } #endif } } printf(" For array b[], %d errors were found.\n",ierr); } if (abs(cAvgErr/cj) > epsilon) { err++; printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(c[j]/cj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,cj,c[j],abs((cj-c[j])/cAvgErr)); } #endif } } printf(" For array c[], %d errors were found.\n",ierr); } if (err == 0) { printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon); } #ifdef VERBOSE printf ("Results Validation Verbose Results: \n"); printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj); printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]); printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj)); #endif } #ifdef TUNED /* stubs for "tuned" versions of the kernels */ void tuned_STREAM_Copy() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; } void tuned_STREAM_Scale(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; } void tuned_STREAM_Triad(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; } /* end of stubs for the "tuned" versions of the kernels */ #endif
omp_parallel_reduction.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" #define DOUBLE_DIGITS 20 /* dt^DOUBLE_DIGITS */ #define MAX_FACTOR 10 #define KNOWN_PRODUCT 3628800 /* 10! */ int test_omp_parallel_reduction() { int sum; int known_sum; double dsum; double dknown_sum; double dt=0.5; /* base of geometric row for + and - test*/ double rounding_error= 1.E-9; int diff; double ddiff; int product; int known_product; int logic_and; int logic_or; int bit_and; int bit_or; int exclusiv_bit_or; int logics[LOOPCOUNT]; int i; double dpt; int result; sum =0; dsum=0; product=1; logic_and=1; logic_or=0; bit_and=1; bit_or=0; exclusiv_bit_or=0; result=0; dt = 1./3.; known_sum = (LOOPCOUNT*(LOOPCOUNT+1))/2; /* Tests for integers */ #pragma omp parallel for schedule(dynamic,1) private(i) reduction(+:sum) for (i=1;i<=LOOPCOUNT;i++) { sum=sum+i; } if(known_sum!=sum) { result++; fprintf(stderr,"Error in sum with integers: Result was %d instead of %d\n",sum,known_sum); } diff = (LOOPCOUNT*(LOOPCOUNT+1))/2; #pragma omp parallel for schedule(dynamic,1) private(i) reduction(-:diff) for (i=1;i<=LOOPCOUNT;++i) { diff=diff-i; } if(diff != 0) { result++; fprintf(stderr,"Error in difference with integers: Result was %d instead of 0.\n",diff); } /* Tests for doubles */ dsum=0; dpt=1; for (i=0;i<DOUBLE_DIGITS;++i) { dpt*=dt; } dknown_sum = (1-dpt)/(1-dt); #pragma omp parallel for schedule(dynamic,1) private(i) reduction(+:dsum) for (i=0;i<DOUBLE_DIGITS;++i) { dsum += pow(dt,i); } if( fabs(dsum-dknown_sum) > rounding_error ) { result++; fprintf(stderr,"Error in sum with doubles: Result was %f instead of %f (Difference: %E)\n",dsum,dknown_sum, dsum-dknown_sum); } dpt=1; for (i=0;i<DOUBLE_DIGITS;++i) { dpt*=dt; } fprintf(stderr,"\n"); ddiff = (1-dpt)/(1-dt); #pragma omp parallel for schedule(dynamic,1) private(i) reduction(-:ddiff) for (i=0;i<DOUBLE_DIGITS;++i) { ddiff -= pow(dt,i); } if( fabs(ddiff) > rounding_error) { result++; fprintf(stderr,"Error in Difference with doubles: Result was %E instead of 0.0\n",ddiff); } /* Tests for product of integers */ #pragma omp parallel for schedule(dynamic,1) private(i) reduction(*:product) for(i=1;i<=MAX_FACTOR;i++) { product *= i; } known_product = KNOWN_PRODUCT; if(known_product != product) { result++; fprintf(stderr,"Error in Product with integers: Result was %d instead of %d\n\n",product,known_product); } /* Tests for logical and */ for(i=0;i<LOOPCOUNT;i++) { logics[i]=1; } #pragma omp parallel for schedule(dynamic,1) private(i) reduction(&&:logic_and) for(i=0;i<LOOPCOUNT;++i) { logic_and = (logic_and && logics[i]); } if(!logic_and) { result++; fprintf(stderr,"Error in logic AND part 1.\n"); } logic_and = 1; logics[LOOPCOUNT/2]=0; #pragma omp parallel for schedule(dynamic,1) private(i) reduction(&&:logic_and) for(i=0;i<LOOPCOUNT;++i) { logic_and = logic_and && logics[i]; } if(logic_and) { result++; fprintf(stderr,"Error in logic AND part 2.\n"); } /* Tests for logical or */ for(i=0;i<LOOPCOUNT;i++) { logics[i]=0; } #pragma omp parallel for schedule(dynamic,1) private(i) reduction(||:logic_or) for(i=0;i<LOOPCOUNT;++i) { logic_or = logic_or || logics[i]; } if(logic_or) { result++; fprintf(stderr,"Error in logic OR part 1.\n"); } logic_or = 0; logics[LOOPCOUNT/2]=1; #pragma omp parallel for schedule(dynamic,1) private(i) reduction(||:logic_or) for(i=0;i<LOOPCOUNT;++i) { logic_or = logic_or || logics[i]; } if(!logic_or) { result++; fprintf(stderr,"Error in logic OR part 2.\n"); } /* Tests for bitwise and */ for(i=0;i<LOOPCOUNT;++i) { logics[i]=1; } #pragma omp parallel for schedule(dynamic,1) private(i) reduction(&:bit_and) for(i=0;i<LOOPCOUNT;++i) { bit_and = (bit_and & logics[i]); } if(!bit_and) { result++; fprintf(stderr,"Error in BIT AND part 1.\n"); } bit_and = 1; logics[LOOPCOUNT/2]=0; #pragma omp parallel for schedule(dynamic,1) private(i) reduction(&:bit_and) for(i=0;i<LOOPCOUNT;++i) { bit_and = bit_and & logics[i]; } if(bit_and) { result++; fprintf(stderr,"Error in BIT AND part 2.\n"); } for(i=0;i<LOOPCOUNT;i++) { logics[i]=0; } /* Tests for bitwise or */ #pragma omp parallel for schedule(dynamic,1) private(i) reduction(|:bit_or) for(i=0;i<LOOPCOUNT;++i) { bit_or = bit_or | logics[i]; } if(bit_or) { result++; fprintf(stderr,"Error in BIT OR part 1\n"); } bit_or = 0; logics[LOOPCOUNT/2]=1; #pragma omp parallel for schedule(dynamic,1) private(i) reduction(|:bit_or) for(i=0;i<LOOPCOUNT;++i) { bit_or = bit_or | logics[i]; } if(!bit_or) { result++; fprintf(stderr,"Error in BIT OR part 2\n"); } for(i=0;i<LOOPCOUNT;i++) { logics[i]=0; } /* Tests for bitwise xor */ #pragma omp parallel for schedule(dynamic,1) private(i) reduction(^:exclusiv_bit_or) for(i=0;i<LOOPCOUNT;++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } if(exclusiv_bit_or) { result++; fprintf(stderr,"Error in EXCLUSIV BIT OR part 1\n"); } exclusiv_bit_or = 0; logics[LOOPCOUNT/2]=1; #pragma omp parallel for schedule(dynamic,1) private(i) reduction(^:exclusiv_bit_or) for(i=0;i<LOOPCOUNT;++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } if(!exclusiv_bit_or) { result++; fprintf(stderr,"Error in EXCLUSIV BIT OR part 2\n"); } /*printf("\nResult:%d\n",result);*/ return (result==0); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_parallel_reduction()) { num_failed++; } } return num_failed; }
mem.c
/* Copyright 2013-2015. The Regents of the University of California. * Copyright 2016. Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2012-2016 Martin Uecker <martin.uecker@med.uni-goettingen.de> * */ #include <stdbool.h> #include <assert.h> #ifdef _OPENMP #include <omp.h> #endif #include "misc/misc.h" #include "misc/debug.h" #include "mem.h" bool memcache = true; void memcache_off(void) { memcache = false; } struct mem_s { const void* ptr; size_t len; bool device; bool free; int device_id; int thread_id; struct mem_s* next; }; static struct mem_s* mem_list = NULL; static bool inside_p(const struct mem_s* rptr, const void* ptr) { return (ptr >= rptr->ptr) && (ptr < rptr->ptr + rptr->len); } static struct mem_s* search(const void* ptr, bool remove) { struct mem_s* rptr = NULL; #pragma omp critical { struct mem_s** nptr = &mem_list; while (true) { rptr = *nptr; if (NULL == rptr) break; if (inside_p(rptr, ptr)) { if (remove) *nptr = rptr->next; break; } nptr = &(rptr->next); } } return rptr; } static bool free_check_p(const struct mem_s* rptr, size_t size, int dev, int tid) { return (rptr->free && (rptr->device_id == dev) && (rptr->len >= size) && (( 0 == size) || (rptr->len <= 4 * size)) // small allocations shall not occupy large memory areas (turned of if requested size is 0) && ((-1 == tid) || (rptr->thread_id == tid))); } static struct mem_s** find_free_unsafe(size_t size, int dev, int tid) { struct mem_s* rptr = NULL; struct mem_s** nptr = &mem_list; while (true) { rptr = *nptr; if (NULL == rptr) break; if (free_check_p(rptr, size, dev, tid)) break; nptr = &(rptr->next); } return nptr; } static struct mem_s* find_free(size_t size, int dev) { struct mem_s* rptr = NULL; #pragma omp critical { rptr = *find_free_unsafe(size, dev, -1); if (NULL != rptr) rptr->free = false; } return rptr; } static void insert(const void* ptr, size_t len, bool device, int dev) { PTR_ALLOC(struct mem_s, nptr); nptr->ptr = ptr; nptr->len = len; nptr->device = device; nptr->device_id = dev; #ifdef _OPENMP nptr->thread_id = omp_get_thread_num(); #else nptr->thread_id = -1; #endif nptr->free = false; #pragma omp critical { nptr->next = mem_list; mem_list = PTR_PASS(nptr); } } void memcache_clear(int dev, void (*device_free)(const void*x)) { struct mem_s* nptr = NULL; if (!memcache) return; do { #pragma omp critical { #ifdef _OPENMP int tid = omp_get_thread_num(); #else int tid = -1; #endif struct mem_s** rptr = find_free_unsafe(0, dev, tid); nptr = *rptr; // remove from list if (NULL != nptr) *rptr = nptr->next; } if (NULL != nptr) { assert(nptr->device); debug_printf(DP_DEBUG3, "Freeing %ld bytes. (DID: %d TID: %d)\n\n", nptr->len, nptr->device_id, nptr->thread_id); device_free(nptr->ptr); xfree(nptr); } } while (NULL != nptr); } bool mem_ondevice(const void* ptr) { if (NULL == ptr) return false; struct mem_s* p = search(ptr, false); bool r = ((NULL != p) && p->device); return r; } bool mem_device_accessible(const void* ptr) { struct mem_s* p = search(ptr, false); return (NULL != p); } void mem_device_free(void* ptr, void (*device_free)(const void* ptr)) { struct mem_s* nptr = search(ptr, !memcache); assert(NULL != nptr); assert(nptr->ptr == ptr); assert(nptr->device); if (memcache) { assert(!nptr->free); nptr->free = true; } else { device_free(ptr); xfree(nptr); } } void* mem_device_malloc(int device, long size, void* (*device_alloc)(size_t)) { if (memcache) { struct mem_s* nptr = find_free(size, device); if (NULL != nptr) { assert(nptr->device); assert(!nptr->free); #ifdef _OPENMP nptr->thread_id = omp_get_thread_num(); #else nptr->thread_id = -1; #endif return (void*)(nptr->ptr); } } void* ptr = device_alloc(size); insert(ptr, size, true, device); return ptr; }
declare_reduction_codegen.c
// RUN: %clang_cc1 -verify -fopenmp -x c -emit-llvm %s -triple %itanium_abi_triple -o - -femit-all-decls | FileCheck %s // RUN: %clang_cc1 -fopenmp -x c -triple %itanium_abi_triple -emit-pch -o %t %s -femit-all-decls // RUN: %clang_cc1 -fopenmp -x c -triple %itanium_abi_triple -include-pch %t -verify %s -emit-llvm -o - -femit-all-decls | FileCheck --check-prefix=CHECK-LOAD %s // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK: [[SSS_INT:.+]] = type { i32 } // CHECK-LOAD: [[SSS_INT:.+]] = type { i32 } #pragma omp declare reduction(+ : int, char : omp_out *= omp_in) // CHECK: define internal {{.*}}void @{{[^(]+}}(i32* noalias, i32* noalias) // CHECK: [[MUL:%.+]] = mul nsw i32 // CHECK-NEXT: store i32 [[MUL]], i32* // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i32* noalias, i32* noalias) // CHECK-LOAD: [[MUL:%.+]] = mul nsw i32 // CHECK-LOAD-NEXT: store i32 [[MUL]], i32* // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK: define internal {{.*}}void @{{[^(]+}}(i8* noalias, i8* noalias) // CHECK: sext i8 // CHECK: sext i8 // CHECK: [[MUL:%.+]] = mul nsw i32 // CHECK-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8 // CHECK-NEXT: store i8 [[TRUNC]], i8* // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i8* noalias, i8* noalias) // CHECK-LOAD: sext i8 // CHECK-LOAD: sext i8 // CHECK-LOAD: [[MUL:%.+]] = mul nsw i32 // CHECK-LOAD-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8 // CHECK-LOAD-NEXT: store i8 [[TRUNC]], i8* // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } #pragma omp declare reduction(fun : float : omp_out += omp_in) initializer(omp_priv = 15 + omp_orig) // CHECK: define internal {{.*}}void @{{[^(]+}}(float* noalias, float* noalias) // CHECK: [[ADD:%.+]] = fadd float // CHECK-NEXT: store float [[ADD]], float* // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: define internal {{.*}}void @{{[^(]+}}(float* noalias, float* noalias) // CHECK: [[ADD:%.+]] = fadd float 1.5 // CHECK-NEXT: store float [[ADD]], float* // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(float* noalias, float* noalias) // CHECK-LOAD: [[ADD:%.+]] = fadd float // CHECK-LOAD-NEXT: store float [[ADD]], float* // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(float* noalias, float* noalias) // CHECK-LOAD: [[ADD:%.+]] = fadd float 1.5 // CHECK-LOAD-NEXT: store float [[ADD]], float* // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } struct SSS { int field; #pragma omp declare reduction(+ : int, char : omp_out *= omp_in) // CHECK: define internal {{.*}}void @{{[^(]+}}(i32* noalias, i32* noalias) // CHECK: [[MUL:%.+]] = mul nsw i32 // CHECK-NEXT: store i32 [[MUL]], i32* // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: define internal {{.*}}void @{{[^(]+}}(i8* noalias, i8* noalias) // CHECK: sext i8 // CHECK: sext i8 // CHECK: [[MUL:%.+]] = mul nsw i32 // CHECK-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8 // CHECK-NEXT: store i8 [[TRUNC]], i8* // CHECK-NEXT: ret void // CHECK-NEXT: } }; void init(struct SSS *priv, struct SSS orig); #pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig)) // CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK: call void @llvm.memcpy // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK: call void @init( // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK-LOAD: call void @llvm.memcpy // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK-LOAD: call void @init( // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK-LABEL: @main // CHECK-LOAD-LABEL: @main int main() { #pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig)) // CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK: call void @llvm.memcpy // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK: call void @init( // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK-LOAD: call void @llvm.memcpy // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK-LOAD: call void @init( // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } { #pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig)) // CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK: call void @llvm.memcpy // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK: call void @init( // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK-LOAD: call void @llvm.memcpy // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK-LOAD: call void @init( // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } } return 0; } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i32* noalias, i32* noalias) // CHECK-LOAD: [[MUL:%.+]] = mul nsw i32 // CHECK-LOAD-NEXT: store i32 [[MUL]], i32* // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i8* noalias, i8* noalias) // CHECK-LOAD: sext i8 // CHECK-LOAD: sext i8 // CHECK-LOAD: [[MUL:%.+]] = mul nsw i32 // CHECK-LOAD-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8 // CHECK-LOAD-NEXT: store i8 [[TRUNC]], i8* // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } #endif
deprecate.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE % % D D E P P R R E C A A T E % % D D EEE PPPPP RRRR EEE C AAAAA T EEE % % D D E P R R E C A A T E % % DDDD EEEEE P R R EEEEE CCCC A A T EEEEE % % % % % % MagickWand Deprecated Methods % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/wand.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* Define declarations. */ #define PixelViewId "PixelView" /* Typedef declarations. */ struct _PixelView { size_t id; char name[MaxTextExtent]; ExceptionInfo *exception; MagickWand *wand; CacheView *view; RectangleInfo region; size_t number_threads; PixelWand ***pixel_wands; MagickBooleanType debug; size_t signature; }; #if !defined(MAGICKCORE_EXCLUDE_DEPRECATED) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w A l l o c a t e W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAllocateWand() allocates an initial drawing wand which is an opaque % handle required by the remaining drawing methods. % % The format of the DrawAllocateWand method is: % % DrawingWand DrawAllocateWand(const DrawInfo *draw_info,Image *image) % % A description of each parameter follows: % % o draw_info: Initial drawing defaults. Set to NULL to use defaults. % % o image: the image to draw on. % */ WandExport DrawingWand *DrawAllocateWand(const DrawInfo *draw_info,Image *image) { return(AcquireDrawingWand(draw_info,image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k A v e r a g e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickAverageImages() average a set of images. % % The format of the MagickAverageImages method is: % % MagickWand *MagickAverageImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ static MagickWand *CloneMagickWandFromImages(const MagickWand *wand, Image *images) { MagickWand *clone_wand; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); clone_wand=(MagickWand *) AcquireMagickMemory(sizeof(*clone_wand)); if (clone_wand == (MagickWand *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", images->filename); (void) memset(clone_wand,0,sizeof(*clone_wand)); clone_wand->id=AcquireWandId(); (void) FormatLocaleString(clone_wand->name,MaxTextExtent,"%s-%.20g", MagickWandId,(double) clone_wand->id); clone_wand->exception=AcquireExceptionInfo(); InheritException(clone_wand->exception,wand->exception); clone_wand->image_info=CloneImageInfo(wand->image_info); clone_wand->quantize_info=CloneQuantizeInfo(wand->quantize_info); clone_wand->images=images; clone_wand->debug=IsEventLogging(); if (clone_wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_wand->name); clone_wand->signature=WandSignature; return(clone_wand); } WandExport MagickWand *MagickAverageImages(MagickWand *wand) { Image *average_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); average_image=EvaluateImages(wand->images,MeanEvaluateOperator, wand->exception); if (average_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,average_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelView() makes a copy of the specified pixel view. % % The format of the ClonePixelView method is: % % PixelView *ClonePixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelView *ClonePixelView(const PixelView *pixel_view) { PixelView *clone_view; register ssize_t i; assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); clone_view=(PixelView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); (void) memset(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) clone_view->id); clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,pixel_view->exception); clone_view->view=CloneCacheView(pixel_view->view); clone_view->region=pixel_view->region; clone_view->number_threads=pixel_view->number_threads; for (i=0; i < (ssize_t) pixel_view->number_threads; i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) pixel_view->pixel_wands[i],pixel_view->region.width); clone_view->debug=pixel_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=WandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelView() deallocates memory associated with a pixel view. % % The format of the DestroyPixelView method is: % % PixelView *DestroyPixelView(PixelView *pixel_view, % const size_t number_wands,const size_t number_threads) % % A description of each parameter follows: % % o pixel_view: the pixel view. % % o number_wand: the number of pixel wands. % % o number_threads: number of threads. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands,const size_t number_threads) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) number_threads; i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport PixelView *DestroyPixelView(PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); pixel_view->pixel_wands=DestroyPixelsThreadSet(pixel_view->pixel_wands, pixel_view->region.width,pixel_view->number_threads); pixel_view->view=DestroyCacheView(pixel_view->view); pixel_view->exception=DestroyExceptionInfo(pixel_view->exception); pixel_view->signature=(~WandSignature); RelinquishWandId(pixel_view->id); pixel_view=(PixelView *) RelinquishMagickMemory(pixel_view); return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferPixelViewIterator() iterates over three pixel views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel region is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination pixel view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferPixelViewIterator method is: % % MagickBooleanType DuplexTransferPixelViewIterator(PixelView *source, % PixelView *duplex,PixelView *destination, % DuplexTransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o duplex: the duplex pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferPixelViewIterator( PixelView *source,PixelView *duplex,PixelView *destination, DuplexTransferPixelViewMethod transfer,void *context) { #define DuplexTransferPixelViewTag "PixelView/DuplexTransfer" ExceptionInfo *exception; Image *destination_image, *duplex_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (DuplexTransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; duplex_image=duplex->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *magick_restrict duplex_indexes, *magick_restrict indexes; register const PixelPacket *magick_restrict duplex_pixels, *magick_restrict pixels; register IndexPacket *magick_restrict destination_indexes; register ssize_t x; register PixelPacket *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->region.x,y, duplex->region.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view); for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x); if (duplex_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetBlackQuantum(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); if (duplex_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetIndex(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (transfer(source,duplex,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_DuplexTransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,DuplexTransferPixelViewTag, progress++,source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewException() returns the severity, reason, and description of any % error that occurs when utilizing a pixel view. % % The format of the GetPixelViewException method is: % % char *GetPixelViewException(const PixelWand *pixel_view, % ExceptionType *severity) % % A description of each parameter follows: % % o pixel_view: the pixel pixel_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetPixelViewException(const PixelView *pixel_view, ExceptionType *severity) { char *description; assert(pixel_view != (const PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); assert(severity != (ExceptionType *) NULL); *severity=pixel_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); *description='\0'; if (pixel_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->reason), MaxTextExtent); if (pixel_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w H e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewHeight() returns the pixel view height. % % The format of the GetPixelViewHeight method is: % % size_t GetPixelViewHeight(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewHeight(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.height); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewIterator() iterates over the pixel view in parallel and calls % your get method for each scanline of the view. The pixel region is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetPixelViewIterator method is: % % MagickBooleanType GetPixelViewIterator(PixelView *source, % GetPixelViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetPixelViewIterator(PixelView *source, GetPixelViewMethod get,void *context) { #define GetPixelViewTag "PixelView/Get" Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (get == (GetPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *indexes; register const PixelPacket *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (get(source,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_GetPixelViewIterator) #endif proceed=SetImageProgress(source_image,GetPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewPixels() returns the pixel view pixel_wands. % % The format of the GetPixelViewPixels method is: % % PixelWand *GetPixelViewPixels(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelWand **GetPixelViewPixels(const PixelView *pixel_view) { const int id = GetOpenMPThreadId(); assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWand() returns the magick wand associated with the pixel view. % % The format of the GetPixelViewWand method is: % % MagickWand *GetPixelViewWand(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickWand *GetPixelViewWand(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W i d t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWidth() returns the pixel view width. % % The format of the GetPixelViewWidth method is: % % size_t GetPixelViewWidth(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewWidth(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w X % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewX() returns the pixel view x offset. % % The format of the GetPixelViewX method is: % % ssize_t GetPixelViewX(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewX(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.x); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w Y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewY() returns the pixel view y offset. % % The format of the GetPixelViewY method is: % % ssize_t GetPixelViewY(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewY(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.y); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPixelView() returns MagickTrue if the the parameter is verified as a pixel % view container. % % The format of the IsPixelView method is: % % MagickBooleanType IsPixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickBooleanType IsPixelView(const PixelView *pixel_view) { size_t length; if (pixel_view == (const PixelView *) NULL) return(MagickFalse); if (pixel_view->signature != WandSignature) return(MagickFalse); length=strlen(PixelViewId); if (LocaleNCompare(pixel_view->name,PixelViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C l i p P a t h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickClipPathImage() clips along the named paths from the 8BIM profile, if % present. Later operations take effect inside the path. Id may be a number % if preceded with #, to work on a numbered path, e.g., "#1" to use the first % path. % % The format of the MagickClipPathImage method is: % % MagickBooleanType MagickClipPathImage(MagickWand *wand, % const char *pathname,const MagickBooleanType inside) % % A description of each parameter follows: % % o wand: the magick wand. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ WandExport MagickBooleanType MagickClipPathImage(MagickWand *wand, const char *pathname,const MagickBooleanType inside) { return(MagickClipImagePath(wand,pathname,inside)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetFillAlpha() returns the alpha used when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawGetFillAlpha method is: % % double DrawGetFillAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport double DrawGetFillAlpha(const DrawingWand *wand) { return(DrawGetFillOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetStrokeAlpha() returns the alpha of stroked object outlines. % % The format of the DrawGetStrokeAlpha method is: % % double DrawGetStrokeAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. */ WandExport double DrawGetStrokeAlpha(const DrawingWand *wand) { return(DrawGetStrokeOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P e e k G r a p h i c W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPeekGraphicWand() returns the current drawing wand. % % The format of the PeekDrawingWand method is: % % DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) { return(PeekDrawingWand(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P o p G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPopGraphicContext() destroys the current drawing wand and returns to the % previously pushed drawing wand. Multiple drawing wands may exist. It is an % error to attempt to pop more drawing wands than have been pushed, and it is % proper form to pop all drawing wands which have been pushed. % % The format of the DrawPopGraphicContext method is: % % MagickBooleanType DrawPopGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPopGraphicContext(DrawingWand *wand) { (void) PopDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P u s h G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPushGraphicContext() clones the current drawing wand to create a new % drawing wand. The original drawing wand(s) may be returned to by % invoking PopDrawingWand(). The drawing wands are stored on a drawing wand % stack. For every Pop there must have already been an equivalent Push. % % The format of the DrawPushGraphicContext method is: % % MagickBooleanType DrawPushGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPushGraphicContext(DrawingWand *wand) { (void) PushDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetFillAlpha() sets the alpha to use when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawSetFillAlpha method is: % % void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o fill_alpha: fill alpha % */ WandExport void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) { DrawSetFillOpacity(wand,fill_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetStrokeAlpha() specifies the alpha of stroked object outlines. % % The format of the DrawSetStrokeAlpha method is: % % void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o stroke_alpha: stroke alpha. The value 1.0 is opaque. % */ WandExport void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) { DrawSetStrokeOpacity(wand,stroke_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C o l o r F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickColorFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickColorFloodfillImage method is: % % MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, % const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); PixelGetQuantumColor(fill,&draw_info->fill); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=ColorFloodfillImage(wand->images,draw_info,target,x,y, bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k D e s c r i b e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickDescribeImage() identifies an image by printing its attributes to the % file. Attributes include the image width, height, size, and others. % % The format of the MagickDescribeImage method is: % % const char *MagickDescribeImage(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport char *MagickDescribeImage(MagickWand *wand) { return(MagickIdentifyImage(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k F l a t t e n I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickFlattenImages() merges a sequence of images. This useful for % combining Photoshop layers into a single image. % % The format of the MagickFlattenImages method is: % % MagickWand *MagickFlattenImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickFlattenImages(MagickWand *wand) { Image *flatten_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); flatten_image=FlattenImages(wand->images,wand->exception); if (flatten_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,flatten_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageAttribute() returns a value associated with the specified % property. Use MagickRelinquishMemory() to free the value when you are % finished with it. % % The format of the MagickGetImageAttribute method is: % % char *MagickGetImageAttribute(MagickWand *wand,const char *property) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % */ WandExport char *MagickGetImageAttribute(MagickWand *wand,const char *property) { return(MagickGetImageProperty(wand,property)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageIndex() returns the index of the current image. % % The format of the MagickGetImageIndex method is: % % ssize_t MagickGetImageIndex(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport ssize_t MagickGetImageIndex(MagickWand *wand) { return(MagickGetIteratorIndex(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e C h a n n e l E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageChannelExtrema() gets the extrema for one or more image % channels. % % The format of the MagickGetImageChannelExtrema method is: % % MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, % const ChannelType channel,size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the image channel(s). % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, const ChannelType channel,size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageChannelExtrema(wand->images,channel,minima,maxima, wand->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageExtrema() gets the extrema for the image. % % The format of the MagickGetImageExtrema method is: % % MagickBooleanType MagickGetImageExtrema(MagickWand *wand, % size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageExtrema(MagickWand *wand, size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageExtrema(wand->images,minima,maxima,wand->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e M a t t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageMatte() returns MagickTrue if the image has a matte channel % otherwise MagickFalse. % % The format of the MagickGetImageMatte method is: % % size_t MagickGetImageMatte(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickBooleanType MagickGetImageMatte(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(wand->images->matte); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImagePixels() extracts pixel data from an image and returns it to % you. The method returns MagickTrue on success otherwise MagickFalse if an % error is encountered. The data is returned as char, short int, int, ssize_t, % float, or double in the order specified by map. % % Suppose you want to extract the first scanline of a 640x480 image as % character data in red-green-blue order: % % MagickGetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickGetImagePixels method is: % % MagickBooleanType MagickGetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter % of a region of pixels you want to extract. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickGetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, void *pixels) { return(MagickExportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageSize() returns the image length in bytes. % % The format of the MagickGetImageSize method is: % % MagickBooleanType MagickGetImageSize(MagickWand *wand, % MagickSizeType *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the image length in bytes. % */ WandExport MagickSizeType MagickGetImageSize(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(GetBlobSize(wand->images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMapImage() replaces the colors of an image with the closest color % from a reference image. % % The format of the MagickMapImage method is: % % MagickBooleanType MagickMapImage(MagickWand *wand, % const MagickWand *map_wand,const MagickBooleanType dither) % % A description of each parameter follows: % % o wand: the magick wand. % % o map: the map wand. % % o dither: Set this integer value to something other than zero to dither % the mapped image. % */ WandExport MagickBooleanType MagickMapImage(MagickWand *wand, const MagickWand *map_wand,const MagickBooleanType dither) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if ((wand->images == (Image *) NULL) || (map_wand->images == (Image *) NULL)) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=MapImage(wand->images,map_wand->images,dither); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a t t e F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMatteFloodfillImage() changes the transparency value of any pixel that % matches target and is an immediate neighbor. If the method % FillToBorderMethod is specified, the transparency value is changed for any % neighbor pixel that does not match the bordercolor member of image. % % The format of the MagickMatteFloodfillImage method is: % % MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, % const double alpha,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, const double alpha,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=MatteFloodfillImage(wand->images,target,ClampToQuantum( (MagickRealType) QuantumRange-QuantumRange*alpha),x,y,bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M e d i a n F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMedianFilterImage() applies a digital filter that improves the quality % of a noisy image. Each pixel is replaced by the median in a set of % neighboring pixels as defined by radius. % % The format of the MagickMedianFilterImage method is: % % MagickBooleanType MagickMedianFilterImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickMedianFilterImage(MagickWand *wand, const double radius) { Image *median_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); median_image=MedianFilterImage(wand->images,radius,wand->exception); if (median_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,median_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M i n i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMinimumImages() returns the minimum intensity of an image sequence. % % The format of the MagickMinimumImages method is: % % MagickWand *MagickMinimumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMinimumImages(MagickWand *wand) { Image *minimum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); minimum_image=EvaluateImages(wand->images,MinEvaluateOperator, wand->exception); if (minimum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,minimum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickModeImage() makes each pixel the 'predominant color' of the % neighborhood of the specified radius. % % The format of the MagickModeImage method is: % % MagickBooleanType MagickModeImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickModeImage(MagickWand *wand, const double radius) { Image *mode_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); mode_image=ModeImage(wand->images,radius,wand->exception); if (mode_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,mode_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o s a i c I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMosaicImages() inlays an image sequence to form a single coherent % picture. It returns a wand with each image in the sequence composited at % the location defined by the page offset of the image. % % The format of the MagickMosaicImages method is: % % MagickWand *MagickMosaicImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMosaicImages(MagickWand *wand) { Image *mosaic_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); mosaic_image=MosaicImages(wand->images,wand->exception); if (mosaic_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,mosaic_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickOpaqueImage method is: % % MagickBooleanType MagickOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImage(wand,target,fill,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickPaintFloodfillImage method is: % % MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, % const ChannelType channel,const PixelWand *fill,const double fuzz, % const PixelWand *bordercolor,const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, const ChannelType channel,const PixelWand *fill,const double fuzz, const PixelWand *bordercolor,const ssize_t x,const ssize_t y) { MagickBooleanType status; status=MagickFloodfillPaintImage(wand,channel,fill,fuzz,bordercolor,x,y, MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickPaintOpaqueImage method is: % % MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, % const ChannelType channel,const PixelWand *target, % const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImageChannel(wand,DefaultChannels,target,fill,fuzz)); } WandExport MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, const ChannelType channel,const PixelWand *target,const PixelWand *fill, const double fuzz) { MagickBooleanType status; status=MagickOpaquePaintImageChannel(wand,channel,target,fill,fuzz, MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickPaintTransparentImage method is: % % MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickTransparentPaintImage(wand,target,alpha,fuzz,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R a d i a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRadialBlurImage() radial blurs an image. % % The format of the MagickRadialBlurImage method is: % % MagickBooleanType MagickRadialBlurImage(MagickWand *wand, % const double angle) % MagickBooleanType MagickRadialBlurImageChannel(MagickWand *wand, % const ChannelType channel,const double angle) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the image channel(s). % % o angle: the angle of the blur in degrees. % */ WandExport MagickBooleanType MagickRadialBlurImage(MagickWand *wand, const double angle) { return(MagickRotationalBlurImage(wand,angle)); } WandExport MagickBooleanType MagickRadialBlurImageChannel(MagickWand *wand, const ChannelType channel,const double angle) { return(MagickRotationalBlurImageChannel(wand,channel,angle)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e c o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRecolorImage() apply color transformation to an image. The method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the MagickRecolorImage method is: % % MagickBooleanType MagickRecolorImage(MagickWand *wand, % const size_t order,const double *color_matrix) % % A description of each parameter follows: % % o wand: the magick wand. % % o order: the number of columns and rows in the color matrix. % % o color_matrix: An array of doubles representing the color matrix. % */ WandExport MagickBooleanType MagickRecolorImage(MagickWand *wand, const size_t order,const double *color_matrix) { Image *transform_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (color_matrix == (const double *) NULL) return(MagickFalse); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); transform_image=RecolorImage(wand->images,order,color_matrix, wand->exception); if (transform_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,transform_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e d u c e N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickReduceNoiseImage() smooths the contours of an image while still % preserving edge information. The algorithm works by replacing each pixel % with its neighbor closest in value. A neighbor is defined by radius. Use % a radius of 0 and ReduceNoise() selects a suitable radius for you. % % The format of the MagickReduceNoiseImage method is: % % MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, const double radius) { Image *noise_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); noise_image=ReduceNoiseImage(wand->images,radius,wand->exception); if (noise_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,noise_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a x i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMaximumImages() returns the maximum intensity of an image sequence. % % The format of the MagickMaximumImages method is: % % MagickWand *MagickMaximumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMaximumImages(MagickWand *wand) { Image *maximum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); maximum_image=EvaluateImages(wand->images,MaxEvaluateOperator, wand->exception); if (maximum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,maximum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageAttribute() associates a property with an image. % % The format of the MagickSetImageAttribute method is: % % MagickBooleanType MagickSetImageAttribute(MagickWand *wand, % const char *property,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % % o value: the value. % */ WandExport MagickBooleanType MagickSetImageAttribute(MagickWand *wand, const char *property,const char *value) { return(SetImageProperty(wand->images,property,value)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageIndex() set the current image to the position of the list % specified with the index parameter. % % The format of the MagickSetImageIndex method is: % % MagickBooleanType MagickSetImageIndex(MagickWand *wand, % const ssize_t index) % % A description of each parameter follows: % % o wand: the magick wand. % % o index: the scene number. % */ WandExport MagickBooleanType MagickSetImageIndex(MagickWand *wand, const ssize_t index) { return(MagickSetIteratorIndex(wand,index)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k S e t I m a g e O p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageOption() associates one or options with a particular image % format (.e.g MagickSetImageOption(wand,"jpeg","perserve","yes"). % % The format of the MagickSetImageOption method is: % % MagickBooleanType MagickSetImageOption(MagickWand *wand, % const char *format,const char *key,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o format: the image format. % % o key: The key. % % o value: The value. % */ WandExport MagickBooleanType MagickSetImageOption(MagickWand *wand, const char *format,const char *key,const char *value) { char option[MaxTextExtent]; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); (void) FormatLocaleString(option,MaxTextExtent,"%s:%s=%s",format,key,value); return(DefineImageOption(wand->image_info,option)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickTransparentImage method is: % % MagickBooleanType MagickTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickPaintTransparentImage(wand,target,alpha,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e g i o n O f I n t e r e s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRegionOfInterestImage() extracts a region of the image and returns it % as a new wand. % % The format of the MagickRegionOfInterestImage method is: % % MagickWand *MagickRegionOfInterestImage(MagickWand *wand, % const size_t width,const size_t height,const ssize_t x, % const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o width: the region width. % % o height: the region height. % % o x: the region x offset. % % o y: the region y offset. % */ WandExport MagickWand *MagickRegionOfInterestImage(MagickWand *wand, const size_t width,const size_t height,const ssize_t x, const ssize_t y) { return(MagickGetImageRegion(wand,width,height,x,y)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImagePixels() accepts pixel datand stores it in the image at the % location you specify. The method returns MagickFalse on success otherwise % MagickTrue if an error is encountered. The pixel data can be either char, % short int, int, ssize_t, float, or double in the order specified by map. % % Suppose your want to upload the first scanline of a 640x480 image from % character data in red-green-blue order: % % MagickSetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickSetImagePixels method is: % % MagickBooleanType MagickSetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % const void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter of a region % of pixels you want to define. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel, % or DoublePixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickSetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, const void *pixels) { return(MagickImportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k W r i t e I m a g e B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickWriteImageBlob() implements direct to memory image formats. It % returns the image as a blob and its length. Use MagickSetFormat() to % set the format of the returned blob (GIF, JPEG, PNG, etc.). % % Use MagickRelinquishMemory() to free the blob when you are done with it. % % The format of the MagickWriteImageBlob method is: % % unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the length of the blob. % */ WandExport unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) { return(MagickGetImageBlob(wand,length)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelView() returns a pixel view required for all other methods in the % Pixel View API. % % The format of the NewPixelView method is: % % PixelView *NewPixelView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands, const size_t number_threads) { PixelWand ***pixel_wands; register ssize_t i; pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) memset(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads)); } return(pixel_wands); } WandExport PixelView *NewPixelView(MagickWand *wand) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickCoreSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) memset(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->wand=wand; pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images, pixel_view->exception); pixel_view->region.width=wand->images->columns; pixel_view->region.height=wand->images->rows; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelViewRegion() returns a pixel view required for all other methods % in the Pixel View API. % % The format of the NewPixelViewRegion method is: % % PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a region of % pixel_wands view. % */ WandExport PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickCoreSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) memset(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images, pixel_view->exception); pixel_view->wand=wand; pixel_view->region.width=width; pixel_view->region.height=height; pixel_view->region.x=x; pixel_view->region.y=y; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l G e t N e x t R o w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelGetNextRow() returns the next row as an array of pixel wands from the % pixel iterator. % % The format of the PixelGetNextRow method is: % % PixelWand **PixelGetNextRow(PixelIterator *iterator, % size_t *number_wands) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o number_wands: the number of pixel wands. % */ WandExport PixelWand **PixelGetNextRow(PixelIterator *iterator) { size_t number_wands; return(PixelGetNextIteratorRow(iterator,&number_wands)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l I t e r a t o r G e t E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelIteratorGetException() returns the severity, reason, and description of % any error that occurs when using other methods in this API. % % The format of the PixelIteratorGetException method is: % % char *PixelIteratorGetException(const Pixeliterator *iterator, % ExceptionType *severity) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o severity: the severity of the error is returned here. % */ WandExport char *PixelIteratorGetException(const PixelIterator *iterator, ExceptionType *severity) { return(PixelGetIteratorException(iterator,severity)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelViewIterator() iterates over the pixel view in parallel and calls % your set method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetPixelViewIterator method is: % % MagickBooleanType SetPixelViewIterator(PixelView *destination, % SetPixelViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the pixel view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetPixelViewIterator(PixelView *destination, SetPixelViewMethod set,void *context) { #define SetPixelViewTag "PixelView/Set" ExceptionInfo *exception; Image *destination_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(destination != (PixelView *) NULL); assert(destination->signature == WandSignature); if (set == (SetPixelViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) #endif for (y=destination->region.y; y < (ssize_t) destination->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->region.x, y,destination->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(destination->view); if (set(destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_SetPixelViewIterator) #endif proceed=SetImageProgress(destination_image,SetPixelViewTag,progress++, destination->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferPixelViewIterator() iterates over two pixel views in parallel and % calls your transfer method for each scanline of the view. The source pixel % region is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination pixel view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferPixelViewIterator method is: % % MagickBooleanType TransferPixelViewIterator(PixelView *source, % PixelView *destination,TransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferPixelViewIterator(PixelView *source, PixelView *destination,TransferPixelViewMethod transfer,void *context) { #define TransferPixelViewTag "PixelView/Transfer" ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (TransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict pixels; register IndexPacket *magick_restrict destination_indexes; register ssize_t x; register PixelPacket *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (transfer(source,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_TransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,TransferPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdatePixelViewIterator() iterates over the pixel view in parallel and calls % your update method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdatePixelViewIterator method is: % % MagickBooleanType UpdatePixelViewIterator(PixelView *source, % UpdatePixelViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdatePixelViewIterator(PixelView *source, UpdatePixelViewMethod update,void *context) { #define UpdatePixelViewTag "PixelView/Update" ExceptionInfo *exception; Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (update == (UpdatePixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->region.x,y, source->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException( source->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (update(source,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->region.width; x++) PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) SetPixelIndex(indexes+x,PixelGetBlackQuantum( source->pixel_wands[id][x])); if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_UpdatePixelViewIterator) #endif proceed=SetImageProgress(source_image,UpdatePixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } #endif
GB_binop__bxnor_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_uint16) // A*D function (colscale): GB (_AxD__bxnor_uint16) // D*A function (rowscale): GB (_DxB__bxnor_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__bxnor_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__bxnor_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_uint16) // C=scalar+B GB (_bind1st__bxnor_uint16) // C=scalar+B' GB (_bind1st_tran__bxnor_uint16) // C=A+scalar GB (_bind2nd__bxnor_uint16) // C=A'+scalar GB (_bind2nd_tran__bxnor_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ~((x) ^ (y)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_UINT16 || GxB_NO_BXNOR_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxnor_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxnor_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxnor_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxnor_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxnor_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxnor_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB (_bind1st_tran__bxnor_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB (_bind2nd_tran__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tiling-1.c
/* matmul.c : Matrix Multiplication with tiling for openmp4 example */ #include <stdlib.h> #include <math.h> #define BLOCK_SIZE 16 /* #define BLOCK_SIZE 32 */ #define NSECPERSEC 1000000000L typedef struct { int width; int height; int stride; int hpad; float* elements; } Matrix; /* Correctly extract the number of nanoseconds from the two time structures */ long int get_nanosecs( struct timespec start_time, struct timespec end_time) { long int nanosecs; if ((end_time.tv_nsec-start_time.tv_nsec)<0) nanosecs = ((((long int) end_time.tv_sec- (long int) start_time.tv_sec )-1)*NSECPERSEC ) + ( NSECPERSEC + (long int) end_time.tv_nsec - (long int) start_time.tv_nsec) ; else nanosecs = (((long int) end_time.tv_sec- (long int) start_time.tv_sec )*NSECPERSEC ) + ( (long int) end_time.tv_nsec - (long int) start_time.tv_nsec ); return nanosecs; } void simple_sgemm_tt(const int M,const int N,const int K,const float alpha, const float* A,const int LDA, const float* B,const int LDB, const float beta,float* C, const int LDC) ; void simple_sgemm_tn(const int M,const int N,const int K,const float alpha, const float* A,const int LDA, const float* B,const int LDB, const float beta,float* C, const int LDC) ; void tiled_sgemm_tt(const int M,const int N,const int K,const float alpha, const float*A, const int LDA, const float* B,const int LDB, const float beta,float* C, const int LDC) ; int verify(float* v_res, float* v_ref, int len) { int passed = 1; int i; for (i = 0; i < len; ++i) { if (fabs(v_res[i] - v_ref[i]) > 0.001*v_ref[i]) { __builtin_abort (); } } return passed; } int main(int argc, char* argv[]){ Matrix A,B,Bt,C,Cref; int a1,a2,a3,i,j; struct timespec start_time1, end_time1; struct timespec start_time2, end_time2; long int nanosecs,total_ops; float gflopsTiled,gflopsCPU; a1 = 35; a2 = 28; a3 = 47; A.height = a1; A.width = a2; A.stride = (((A.width-1)/BLOCK_SIZE)+1) * BLOCK_SIZE; A.hpad = (((A.height-1)/BLOCK_SIZE)+1) * BLOCK_SIZE; A.elements = (float*)malloc(A.stride * A.hpad* sizeof(float)); B.height = a2; B.width = a3; B.stride = (((B.width-1)/BLOCK_SIZE)+1) * BLOCK_SIZE; B.hpad = (((B.height-1)/BLOCK_SIZE)+1) * BLOCK_SIZE; B.elements = (float*)malloc(B.stride * B.hpad * sizeof(float)); /* Bt is same as B but stored in column-major order */ Bt.height = B.height; Bt.width = B.width; Bt.stride = B.stride; Bt.hpad = B.hpad; Bt.elements = (float*)malloc(Bt.stride * Bt.hpad * sizeof(float)); C.height = a1; C.width = a3; C.stride = (((C.width-1)/BLOCK_SIZE)+1) * BLOCK_SIZE; C.hpad = (((C.height-1)/BLOCK_SIZE)+1) * BLOCK_SIZE; C.elements = (float*)malloc(C.stride * C.hpad * sizeof(float)); Cref.height = a1; Cref.width = a3; Cref.stride = (((Cref.width-1)/BLOCK_SIZE)+1) * BLOCK_SIZE; Cref.hpad = (((Cref.height-1)/BLOCK_SIZE)+1) * BLOCK_SIZE; Cref.elements = (float*)malloc(Cref.stride * Cref.hpad * sizeof(float)); for(i = 0; i < A.hpad ; i++) for(j = 0; j < A.stride; j++) { if (( j<A.width ) && (i<A.height)) { A.elements[i*A.stride + j] = (i % 3); } else { A.elements[i*A.stride + j] = 0.0; } } /* Initialize B and Bt */ for(i = 0; i < B.hpad ; i++) for(j = 0; j < B.stride; j++) { if (( j<B.width ) && (i<B.height)) { B.elements[i*B.stride+j] = (j % 2); Bt.elements[j*Bt.stride+i] = B.elements[i*B.stride+j] ; } else { B.elements[i*B.stride+j] = 0.0; Bt.elements[j*Bt.stride+i] = 0.0; } } /* zero C, and Cref */ for(i = 0; i < C.hpad; i++) for(j = 0; j < C.stride; j++) { C.elements[i*C.stride+j] = 0.0; Cref.elements[i*Cref.stride+j] = 0.0; } simple_sgemm_tt(A.height,B.width,B.height,1.0,A.elements,A.stride,B.elements,B.stride,1.0,Cref.elements,Cref.stride); tiled_sgemm_tt(A.height,B.width,B.height,1.0,A.elements,A.stride,B.elements,B.stride,1.0,C.elements,C.stride); verify(C.elements, Cref.elements, C.height * C.stride); return 0; } void simple_sgemm_tt(const int M,const int N,const int K,const float alpha, const float* A,const int LDA, const float* B,const int LDB, const float beta,float* C, const int LDC) { /* A,B, and C are in row-major order */ int c_row,c_col,inner; float sum; for (c_col = 0 ; c_col<N; c_col++ ) { for (c_row = 0 ; c_row<M; c_row++ ) { sum = 0.0 ; for (inner = 0 ; inner<K; inner++ ) { sum += A[c_row*LDA + inner] * B[inner*LDB + c_col] ; } C[c_row*LDC + c_col] = alpha*sum + beta*C[ c_row*LDC + c_col] ; } } } /*************************** tiled_sgemm_tt: Tiled matrix multiplication: ***************************/ void tiled_sgemm_tt(const int M, const int N, const int K, const float alpha, const float*A, const int LDA, const float*B, const int LDB, const float beta, float*C, const int LDC){ #pragma omp target teams map(to:A[M*K],B[K*N]) map(from:C[M*N]) #pragma omp distribute collapse(2) for (int C_row_start=0 ; C_row_start < M ; C_row_start+=BLOCK_SIZE) for (int C_col_start=0 ; C_col_start < N ; C_col_start+=BLOCK_SIZE) { // Each team has a local copy of these mini matrices float As[BLOCK_SIZE][BLOCK_SIZE]; float Bs[BLOCK_SIZE][BLOCK_SIZE]; #pragma omp parallel { int C_row, C_col; float Cval = 0.0; for (int kblock = 0; kblock < K ; kblock += BLOCK_SIZE ) { #pragma omp for collapse(2) for (int row=0 ; row < BLOCK_SIZE ; row++) for (int col=0 ; col < BLOCK_SIZE ; col++) { C_row = C_row_start + row; C_col = C_col_start + col; if ((C_row < M) && (kblock + col < K)) As[row][col] = A[(C_row*LDA)+ kblock + col]; else As[row][col] = 0; if ((kblock + row < K) && C_col < N) Bs[row][col] = B[((kblock+row)*LDB)+ C_col]; else Bs[row][col] = 0; } #pragma omp for collapse(2) for (int row=0 ; row < BLOCK_SIZE ; row++) for (int col=0 ; col < BLOCK_SIZE ; col++) { for (int e = 0; e < BLOCK_SIZE; ++e) Cval += As[row][e] * Bs[e][col]; } } /* End for kblock .. */ #pragma omp for collapse(2) for (int row=0 ; row < BLOCK_SIZE ; row++) for (int col=0 ; col < BLOCK_SIZE ; col++) { C_row = C_row_start + row; C_col = C_col_start + col; if ((C_row < M) && (C_col < N)) C[(C_row*LDC)+C_col] = alpha*Cval + beta*C[(C_row*LDC)+C_col]; } } /* end parallel */ } /* end target teams distribute */ }